Devin Jeanpierre | 19fc55f | 2017-04-24 10:49:00 -0700 | [diff] [blame] | 1 | # Copyright 2007 The JsonCpp Authors |
Sam Clegg | 6386061 | 2015-04-09 18:01:33 -0700 | [diff] [blame] | 2 | # Distributed under MIT license, or public domain if desired and |
| 3 | # recognized in your jurisdiction. |
| 4 | # See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE |
| 5 | |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 6 | import os |
| 7 | import os.path |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 8 | from fnmatch import fnmatch |
| 9 | import targz |
| 10 | |
| 11 | ##def DoxyfileParse(file_contents): |
| 12 | ## """ |
| 13 | ## Parse a Doxygen source file and return a dictionary of all the values. |
| 14 | ## Values will be strings and lists of strings. |
| 15 | ## """ |
| 16 | ## data = {} |
| 17 | ## |
| 18 | ## import shlex |
| 19 | ## lex = shlex.shlex(instream = file_contents, posix = True) |
| 20 | ## lex.wordchars += "*+./-:" |
| 21 | ## lex.whitespace = lex.whitespace.replace("\n", "") |
| 22 | ## lex.escape = "" |
| 23 | ## |
| 24 | ## lineno = lex.lineno |
| 25 | ## last_backslash_lineno = lineno |
| 26 | ## token = lex.get_token() |
| 27 | ## key = token # the first token should be a key |
| 28 | ## last_token = "" |
| 29 | ## key_token = False |
| 30 | ## next_key = False |
| 31 | ## new_data = True |
| 32 | ## |
| 33 | ## def append_data(data, key, new_data, token): |
| 34 | ## if new_data or len(data[key]) == 0: |
| 35 | ## data[key].append(token) |
| 36 | ## else: |
| 37 | ## data[key][-1] += token |
| 38 | ## |
| 39 | ## while token: |
| 40 | ## if token in ['\n']: |
| 41 | ## if last_token not in ['\\']: |
| 42 | ## key_token = True |
| 43 | ## elif token in ['\\']: |
| 44 | ## pass |
| 45 | ## elif key_token: |
| 46 | ## key = token |
| 47 | ## key_token = False |
| 48 | ## else: |
| 49 | ## if token == "+=": |
| 50 | ## if not data.has_key(key): |
| 51 | ## data[key] = list() |
| 52 | ## elif token == "=": |
| 53 | ## data[key] = list() |
| 54 | ## else: |
Christopher Dunn | 494950a | 2015-01-24 15:29:52 -0600 | [diff] [blame] | 55 | ## append_data(data, key, new_data, token) |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 56 | ## new_data = True |
| 57 | ## |
| 58 | ## last_token = token |
| 59 | ## token = lex.get_token() |
| 60 | ## |
| 61 | ## if last_token == '\\' and token != '\n': |
| 62 | ## new_data = False |
Christopher Dunn | 494950a | 2015-01-24 15:29:52 -0600 | [diff] [blame] | 63 | ## append_data(data, key, new_data, '\\') |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 64 | ## |
| 65 | ## # compress lists of len 1 into single strings |
| 66 | ## for (k, v) in data.items(): |
| 67 | ## if len(v) == 0: |
| 68 | ## data.pop(k) |
| 69 | ## |
| 70 | ## # items in the following list will be kept as lists and not converted to strings |
| 71 | ## if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]: |
| 72 | ## continue |
| 73 | ## |
| 74 | ## if len(v) == 1: |
| 75 | ## data[k] = v[0] |
| 76 | ## |
| 77 | ## return data |
| 78 | ## |
| 79 | ##def DoxySourceScan(node, env, path): |
| 80 | ## """ |
| 81 | ## Doxygen Doxyfile source scanner. This should scan the Doxygen file and add |
| 82 | ## any files used to generate docs to the list of source files. |
| 83 | ## """ |
| 84 | ## default_file_patterns = [ |
| 85 | ## '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx', |
| 86 | ## '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++', |
| 87 | ## '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm', |
| 88 | ## '*.py', |
| 89 | ## ] |
| 90 | ## |
| 91 | ## default_exclude_patterns = [ |
| 92 | ## '*~', |
| 93 | ## ] |
| 94 | ## |
| 95 | ## sources = [] |
| 96 | ## |
| 97 | ## data = DoxyfileParse(node.get_contents()) |
| 98 | ## |
| 99 | ## if data.get("RECURSIVE", "NO") == "YES": |
| 100 | ## recursive = True |
| 101 | ## else: |
| 102 | ## recursive = False |
| 103 | ## |
| 104 | ## file_patterns = data.get("FILE_PATTERNS", default_file_patterns) |
| 105 | ## exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns) |
| 106 | ## |
| 107 | ## for node in data.get("INPUT", []): |
| 108 | ## if os.path.isfile(node): |
| 109 | ## sources.add(node) |
| 110 | ## elif os.path.isdir(node): |
| 111 | ## if recursive: |
| 112 | ## for root, dirs, files in os.walk(node): |
| 113 | ## for f in files: |
| 114 | ## filename = os.path.join(root, f) |
| 115 | ## |
| 116 | ## pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False) |
| 117 | ## exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True) |
| 118 | ## |
| 119 | ## if pattern_check and not exclude_check: |
| 120 | ## sources.append(filename) |
| 121 | ## else: |
| 122 | ## for pattern in file_patterns: |
| 123 | ## sources.extend(glob.glob("/".join([node, pattern]))) |
Christopher Dunn | 494950a | 2015-01-24 15:29:52 -0600 | [diff] [blame] | 124 | ## sources = map(lambda path: env.File(path), sources) |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 125 | ## return sources |
| 126 | ## |
| 127 | ## |
| 128 | ##def DoxySourceScanCheck(node, env): |
| 129 | ## """Check if we should scan this file""" |
| 130 | ## return os.path.isfile(node.path) |
| 131 | |
| 132 | def srcDistEmitter(source, target, env): |
| 133 | ## """Doxygen Doxyfile emitter""" |
| 134 | ## # possible output formats and their default values and output locations |
| 135 | ## output_formats = { |
| 136 | ## "HTML": ("YES", "html"), |
| 137 | ## "LATEX": ("YES", "latex"), |
| 138 | ## "RTF": ("NO", "rtf"), |
| 139 | ## "MAN": ("YES", "man"), |
| 140 | ## "XML": ("NO", "xml"), |
| 141 | ## } |
| 142 | ## |
| 143 | ## data = DoxyfileParse(source[0].get_contents()) |
| 144 | ## |
| 145 | ## targets = [] |
| 146 | ## out_dir = data.get("OUTPUT_DIRECTORY", ".") |
| 147 | ## |
| 148 | ## # add our output locations |
| 149 | ## for (k, v) in output_formats.items(): |
| 150 | ## if data.get("GENERATE_" + k, v[0]) == "YES": |
Christopher Dunn | 494950a | 2015-01-24 15:29:52 -0600 | [diff] [blame] | 151 | ## targets.append(env.Dir(os.path.join(out_dir, data.get(k + "_OUTPUT", v[1])))) |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 152 | ## |
| 153 | ## # don't clobber targets |
| 154 | ## for node in targets: |
| 155 | ## env.Precious(node) |
| 156 | ## |
| 157 | ## # set up cleaning stuff |
| 158 | ## for node in targets: |
| 159 | ## env.Clean(node, node) |
| 160 | ## |
| 161 | ## return (targets, source) |
| 162 | return (target,source) |
| 163 | |
| 164 | def generate(env): |
| 165 | """ |
| 166 | Add builders and construction variables for the |
| 167 | SrcDist tool. |
| 168 | """ |
Christopher Dunn | 494950a | 2015-01-24 15:29:52 -0600 | [diff] [blame] | 169 | ## doxyfile_scanner = env.Scanner(## DoxySourceScan, |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 170 | ## "DoxySourceScan", |
| 171 | ## scan_check = DoxySourceScanCheck, |
Christopher Dunn | 494950a | 2015-01-24 15:29:52 -0600 | [diff] [blame] | 172 | ##) |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 173 | |
Baptiste Lepilleur | 86ccb76 | 2009-11-19 13:05:54 +0000 | [diff] [blame] | 174 | if targz.exists(env): |
Christopher Dunn | 494950a | 2015-01-24 15:29:52 -0600 | [diff] [blame] | 175 | srcdist_builder = targz.makeBuilder(srcDistEmitter) |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 176 | |
Baptiste Lepilleur | 86ccb76 | 2009-11-19 13:05:54 +0000 | [diff] [blame] | 177 | env['BUILDERS']['SrcDist'] = srcdist_builder |
Christopher Dunn | f986423 | 2007-06-14 21:01:26 +0000 | [diff] [blame] | 178 | |
| 179 | def exists(env): |
| 180 | """ |
| 181 | Make sure srcdist exists. |
| 182 | """ |
Baptiste Lepilleur | 86ccb76 | 2009-11-19 13:05:54 +0000 | [diff] [blame] | 183 | return targz.exists(env) |