- reorganized repository to match standard layout
diff --git a/scons-tools/doxygen.py b/scons-tools/doxygen.py
new file mode 100644
index 0000000..f85f4a3
--- /dev/null
+++ b/scons-tools/doxygen.py
@@ -0,0 +1,205 @@
+# Big issue:
+# emitter depends on doxyfile which is generated from doxyfile.in.
+# build fails after cleaning and relaunching the build.
+
+import os
+import os.path
+import glob
+from fnmatch import fnmatch
+
+def DoxyfileParse(file_contents):
+ """
+ Parse a Doxygen source file and return a dictionary of all the values.
+ Values will be strings and lists of strings.
+ """
+ data = {}
+
+ import shlex
+ lex = shlex.shlex(instream = file_contents, posix = True)
+ lex.wordchars += "*+./-:"
+ lex.whitespace = lex.whitespace.replace("\n", "")
+ lex.escape = ""
+
+ lineno = lex.lineno
+ last_backslash_lineno = lineno
+ token = lex.get_token()
+ key = token # the first token should be a key
+ last_token = ""
+ key_token = False
+ next_key = False
+ new_data = True
+
+ def append_data(data, key, new_data, token):
+ if new_data or len(data[key]) == 0:
+ data[key].append(token)
+ else:
+ data[key][-1] += token
+
+ while token:
+ if token in ['\n']:
+ if last_token not in ['\\']:
+ key_token = True
+ elif token in ['\\']:
+ pass
+ elif key_token:
+ key = token
+ key_token = False
+ else:
+ if token == "+=":
+ if not data.has_key(key):
+ data[key] = list()
+ elif token == "=":
+ data[key] = list()
+ else:
+ append_data( data, key, new_data, token )
+ new_data = True
+
+ last_token = token
+ token = lex.get_token()
+
+ if last_token == '\\' and token != '\n':
+ new_data = False
+ append_data( data, key, new_data, '\\' )
+
+ # compress lists of len 1 into single strings
+ for (k, v) in data.items():
+ if len(v) == 0:
+ data.pop(k)
+
+ # items in the following list will be kept as lists and not converted to strings
+ if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
+ continue
+
+ if len(v) == 1:
+ data[k] = v[0]
+
+ return data
+
+def DoxySourceScan(node, env, path):
+ """
+ Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
+ any files used to generate docs to the list of source files.
+ """
+ default_file_patterns = [
+ '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
+ '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
+ '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
+ '*.py',
+ ]
+
+ default_exclude_patterns = [
+ '*~',
+ ]
+
+ sources = []
+
+ data = DoxyfileParse(node.get_contents())
+
+ if data.get("RECURSIVE", "NO") == "YES":
+ recursive = True
+ else:
+ recursive = False
+
+ file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
+ exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
+
+ doxyfile_dir = str( node.dir )
+
+## print 'running from', os.getcwd()
+ for node in data.get("INPUT", []):
+ node_real_path = os.path.normpath( os.path.join( doxyfile_dir, node ) )
+ if os.path.isfile(node_real_path):
+## print str(node), 'is a file'
+ sources.append(node)
+ elif os.path.isdir(node_real_path):
+## print str(node), 'is a directory'
+ if recursive:
+ for root, dirs, files in os.walk(node):
+ for f in files:
+ filename = os.path.join(root, f)
+
+ pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
+ exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
+
+ if pattern_check and not exclude_check:
+ sources.append(filename)
+## print ' adding source', os.path.abspath( filename )
+ else:
+ for pattern in file_patterns:
+ sources.extend(glob.glob(os.path.join( node, pattern)))
+## else:
+## print str(node), 'is neither a file nor a directory'
+ sources = map( lambda path: env.File(path), sources )
+ return sources
+
+
+def DoxySourceScanCheck(node, env):
+ """Check if we should scan this file"""
+ return os.path.isfile(node.path)
+
+def DoxyEmitter(source, target, env):
+ """Doxygen Doxyfile emitter"""
+ # possible output formats and their default values and output locations
+ output_formats = {
+ "HTML": ("YES", "html"),
+ "LATEX": ("YES", "latex"),
+ "RTF": ("NO", "rtf"),
+ "MAN": ("YES", "man"),
+ "XML": ("NO", "xml"),
+ }
+
+## print '#### DoxyEmitter:', source[0].abspath, os.path.exists( source[0].abspath )
+ data = DoxyfileParse(source[0].get_contents())
+
+ targets = []
+ out_dir = data.get("OUTPUT_DIRECTORY", ".")
+
+ # add our output locations
+ for (k, v) in output_formats.items():
+ if data.get("GENERATE_" + k, v[0]) == "YES":
+ targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
+
+ # don't clobber targets
+ for node in targets:
+ env.Precious(node)
+
+ # set up cleaning stuff
+ for node in targets:
+ clean_cmd = env.Clean(node, node)
+ env.Depends( clean_cmd, source )
+
+ return (targets, source)
+
+def generate(env):
+ """
+ Add builders and construction variables for the
+ Doxygen tool. This is currently for Doxygen 1.4.6.
+ """
+ doxyfile_scanner = env.Scanner(
+ DoxySourceScan,
+ "DoxySourceScan",
+ scan_check = DoxySourceScanCheck,
+ )
+
+ doxyfile_builder = env.Builder(
+ action = env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}",
+ varlist=['$SOURCES']),
+ emitter = DoxyEmitter,
+ target_factory = env.fs.Entry,
+ single_source = True,
+ source_scanner = doxyfile_scanner,
+ )
+
+ env.Append(BUILDERS = {
+ 'Doxygen': doxyfile_builder,
+ })
+
+ env.AppendUnique(
+ DOXYGEN = 'doxygen',
+ )
+
+def exists(env):
+ """
+ Make sure doxygen exists.
+ """
+ return env.Detect("doxygen")
diff --git a/scons-tools/srcdist.py b/scons-tools/srcdist.py
new file mode 100644
index 0000000..cfc5407
--- /dev/null
+++ b/scons-tools/srcdist.py
@@ -0,0 +1,179 @@
+import os
+import os.path
+import glob
+from fnmatch import fnmatch
+import targz
+
+##def DoxyfileParse(file_contents):
+## """
+## Parse a Doxygen source file and return a dictionary of all the values.
+## Values will be strings and lists of strings.
+## """
+## data = {}
+##
+## import shlex
+## lex = shlex.shlex(instream = file_contents, posix = True)
+## lex.wordchars += "*+./-:"
+## lex.whitespace = lex.whitespace.replace("\n", "")
+## lex.escape = ""
+##
+## lineno = lex.lineno
+## last_backslash_lineno = lineno
+## token = lex.get_token()
+## key = token # the first token should be a key
+## last_token = ""
+## key_token = False
+## next_key = False
+## new_data = True
+##
+## def append_data(data, key, new_data, token):
+## if new_data or len(data[key]) == 0:
+## data[key].append(token)
+## else:
+## data[key][-1] += token
+##
+## while token:
+## if token in ['\n']:
+## if last_token not in ['\\']:
+## key_token = True
+## elif token in ['\\']:
+## pass
+## elif key_token:
+## key = token
+## key_token = False
+## else:
+## if token == "+=":
+## if not data.has_key(key):
+## data[key] = list()
+## elif token == "=":
+## data[key] = list()
+## else:
+## append_data( data, key, new_data, token )
+## new_data = True
+##
+## last_token = token
+## token = lex.get_token()
+##
+## if last_token == '\\' and token != '\n':
+## new_data = False
+## append_data( data, key, new_data, '\\' )
+##
+## # compress lists of len 1 into single strings
+## for (k, v) in data.items():
+## if len(v) == 0:
+## data.pop(k)
+##
+## # items in the following list will be kept as lists and not converted to strings
+## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
+## continue
+##
+## if len(v) == 1:
+## data[k] = v[0]
+##
+## return data
+##
+##def DoxySourceScan(node, env, path):
+## """
+## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
+## any files used to generate docs to the list of source files.
+## """
+## default_file_patterns = [
+## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
+## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
+## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
+## '*.py',
+## ]
+##
+## default_exclude_patterns = [
+## '*~',
+## ]
+##
+## sources = []
+##
+## data = DoxyfileParse(node.get_contents())
+##
+## if data.get("RECURSIVE", "NO") == "YES":
+## recursive = True
+## else:
+## recursive = False
+##
+## file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
+## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
+##
+## for node in data.get("INPUT", []):
+## if os.path.isfile(node):
+## sources.add(node)
+## elif os.path.isdir(node):
+## if recursive:
+## for root, dirs, files in os.walk(node):
+## for f in files:
+## filename = os.path.join(root, f)
+##
+## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
+## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
+##
+## if pattern_check and not exclude_check:
+## sources.append(filename)
+## else:
+## for pattern in file_patterns:
+## sources.extend(glob.glob("/".join([node, pattern])))
+## sources = map( lambda path: env.File(path), sources )
+## return sources
+##
+##
+##def DoxySourceScanCheck(node, env):
+## """Check if we should scan this file"""
+## return os.path.isfile(node.path)
+
+def srcDistEmitter(source, target, env):
+## """Doxygen Doxyfile emitter"""
+## # possible output formats and their default values and output locations
+## output_formats = {
+## "HTML": ("YES", "html"),
+## "LATEX": ("YES", "latex"),
+## "RTF": ("NO", "rtf"),
+## "MAN": ("YES", "man"),
+## "XML": ("NO", "xml"),
+## }
+##
+## data = DoxyfileParse(source[0].get_contents())
+##
+## targets = []
+## out_dir = data.get("OUTPUT_DIRECTORY", ".")
+##
+## # add our output locations
+## for (k, v) in output_formats.items():
+## if data.get("GENERATE_" + k, v[0]) == "YES":
+## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
+##
+## # don't clobber targets
+## for node in targets:
+## env.Precious(node)
+##
+## # set up cleaning stuff
+## for node in targets:
+## env.Clean(node, node)
+##
+## return (targets, source)
+ return (target,source)
+
+def generate(env):
+ """
+ Add builders and construction variables for the
+ SrcDist tool.
+ """
+## doxyfile_scanner = env.Scanner(
+## DoxySourceScan,
+## "DoxySourceScan",
+## scan_check = DoxySourceScanCheck,
+## )
+
+ srcdist_builder = targz.makeBuilder( srcDistEmitter )
+
+ env['BUILDERS']['SrcDist'] = srcdist_builder
+
+def exists(env):
+ """
+ Make sure srcdist exists.
+ """
+ return True
diff --git a/scons-tools/substinfile.py b/scons-tools/substinfile.py
new file mode 100644
index 0000000..2502262
--- /dev/null
+++ b/scons-tools/substinfile.py
@@ -0,0 +1,79 @@
+import re
+from SCons.Script import * # the usual scons stuff you get in a SConscript
+
+def generate(env):
+ """
+ Add builders and construction variables for the
+ SubstInFile tool.
+
+ Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT
+ from the source to the target.
+ The values of SUBST_DICT first have any construction variables expanded
+ (its keys are not expanded).
+ If a value of SUBST_DICT is a python callable function, it is called and
+ the result is expanded as the value.
+ If there's more than one source and more than one target, each target gets
+ substituted from the corresponding source.
+ """
+ def do_subst_in_file(targetfile, sourcefile, dict):
+ """Replace all instances of the keys of dict with their values.
+ For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
+ then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
+ """
+ try:
+ f = open(sourcefile, 'rb')
+ contents = f.read()
+ f.close()
+ except:
+ raise SCons.Errors.UserError, "Can't read source file %s"%sourcefile
+ for (k,v) in dict.items():
+ contents = re.sub(k, v, contents)
+ try:
+ f = open(targetfile, 'wb')
+ f.write(contents)
+ f.close()
+ except:
+ raise SCons.Errors.UserError, "Can't write target file %s"%targetfile
+ return 0 # success
+
+ def subst_in_file(target, source, env):
+ if not env.has_key('SUBST_DICT'):
+ raise SCons.Errors.UserError, "SubstInFile requires SUBST_DICT to be set."
+ d = dict(env['SUBST_DICT']) # copy it
+ for (k,v) in d.items():
+ if callable(v):
+ d[k] = env.subst(v()).replace('\\','\\\\')
+ elif SCons.Util.is_String(v):
+ d[k] = env.subst(v).replace('\\','\\\\')
+ else:
+ raise SCons.Errors.UserError, "SubstInFile: key %s: %s must be a string or callable"%(k, repr(v))
+ for (t,s) in zip(target, source):
+ return do_subst_in_file(str(t), str(s), d)
+
+ def subst_in_file_string(target, source, env):
+ """This is what gets printed on the console."""
+ return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
+ for (t,s) in zip(target, source)])
+
+ def subst_emitter(target, source, env):
+ """Add dependency from substituted SUBST_DICT to target.
+ Returns original target, source tuple unchanged.
+ """
+ d = env['SUBST_DICT'].copy() # copy it
+ for (k,v) in d.items():
+ if callable(v):
+ d[k] = env.subst(v())
+ elif SCons.Util.is_String(v):
+ d[k]=env.subst(v)
+ Depends(target, SCons.Node.Python.Value(d))
+ return target, source
+
+## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!?
+ subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string )
+ env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
+
+def exists(env):
+ """
+ Make sure tool exists.
+ """
+ return True
diff --git a/scons-tools/targz.py b/scons-tools/targz.py
new file mode 100644
index 0000000..2f21204
--- /dev/null
+++ b/scons-tools/targz.py
@@ -0,0 +1,78 @@
+"""tarball
+
+Tool-specific initialization for tarball.
+
+"""
+
+## Commands to tackle a command based implementation:
+##to unpack on the fly...
+##gunzip < FILE.tar.gz | tar xvf -
+##to pack on the fly...
+##tar cvf - FILE-LIST | gzip -c > FILE.tar.gz
+
+import os.path
+
+import SCons.Builder
+import SCons.Node.FS
+import SCons.Util
+
+try:
+ import gzip
+ import tarfile
+ internal_targz = 1
+except ImportError:
+ internal_targz = 0
+
+TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
+
+if internal_targz:
+ def targz(target, source, env):
+ def archive_name( path ):
+ path = os.path.normpath( os.path.abspath( path ) )
+ common_path = os.path.commonprefix( (base_dir, path) )
+ archive_name = path[len(common_path):]
+ return archive_name
+
+ def visit(tar, dirname, names):
+ for name in names:
+ path = os.path.join(dirname, name)
+ if os.path.isfile(path):
+ tar.add(path, archive_name(path) )
+ compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
+ base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
+ target_path = str(target[0])
+ fileobj = gzip.GzipFile( target_path, 'wb', compression )
+ tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj)
+ for source in source:
+ source_path = str(source)
+ if source.isdir():
+ os.path.walk(source_path, visit, tar)
+ else:
+ tar.add(source_path, archive_name(source_path) ) # filename, arcname
+ tar.close()
+
+targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR'])
+
+def makeBuilder( emitter = None ):
+ return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
+ source_factory = SCons.Node.FS.Entry,
+ source_scanner = SCons.Defaults.DirScanner,
+ suffix = '$TARGZ_SUFFIX',
+ multi = 1)
+TarGzBuilder = makeBuilder()
+
+def generate(env):
+ """Add Builders and construction variables for zip to an Environment.
+ The following environnement variables may be set:
+ TARGZ_COMPRESSION_LEVEL: integer, [0-9]. 0: no compression, 9: best compression (same as gzip compression level).
+ TARGZ_BASEDIR: base-directory used to determine archive name (this allow archive name to be relative
+ to something other than top-dir).
+ """
+ env['BUILDERS']['TarGz'] = TarGzBuilder
+ env['TARGZ_COM'] = targzAction
+ env['TARGZ_COMPRESSION_LEVEL'] = TARGZ_DEFAULT_COMPRESSION_LEVEL # range 0-9
+ env['TARGZ_SUFFIX'] = '.tar.gz'
+ env['TARGZ_BASEDIR'] = env.Dir('.') # Sources archive name are made relative to that directory.
+
+def exists(env):
+ return internal_targz