First version of PythonCharts.
The reason why it is so simple is that I wanted to get something into the project that people can use to compare different test runs easily. More functionality will come later.
tools/python_charts/src/gviz_api.py is a copy of the Google visualization Python API available from http://google-visualization-python.googlecode.com/svn/trunk/
Review URL: http://webrtc-codereview.appspot.com/257003
git-svn-id: http://webrtc.googlecode.com/svn/trunk@893 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/tools/python_charts/README b/tools/python_charts/README
new file mode 100644
index 0000000..483c402
--- /dev/null
+++ b/tools/python_charts/README
@@ -0,0 +1,41 @@
+This file describes how to setup Eclipse and then the Python Charts project
+
+Setup Eclipse
+-------------
+These instructions were tested on Linux, but are very similar for Windows and
+Mac.
+1. Ensure you have Python 2.x installed
+2. Download and install Google App Engine SDK for Python from
+ http://code.google.com/appengine/downloads.html
+3. Note which location you put App Engine in, as this will be needed later on.
+4. Download Eclipse from http://www.eclipse.org. Any distribution will probably
+ do, but if you're going to do mainly web development, you might pick Eclipse
+ IDE for JavaScript Web Developers
+5. Install the PyDev plugin using the Eclipse update site mentioned at
+ http://pydev.org/download.html
+6. Install the Google Plugin for Eclipse: http://code.google.com/eclipse/
+
+Setup the project
+-----------------
+Generic instructions are available at
+http://code.google.com/appengine/docs/python/gettingstarted/ but the following
+should be enough:
+1. Launch Eclipse and create a workspace
+2. Create a new PyDev Project
+3. In the PyDev Project wizard, uncheck the "Use Default" checkbox for Project
+ contents and browse to your tools/python_charts directory.
+4. Enter a project name. We'll assume PythonCharts in the examples below.
+5. In the radio button of the lower part of the window, select
+ "Add project directory to the PYTHONPATH"
+6. Click Finish
+7. Select the Run > Run Configuration… menu item
+8. Create a new "Python Run" configuration
+9. Select your Python Charts project as project
+10. As Main Module, enter the path to your dev_appserver.py, which is a part
+ of your App Engine installation,
+ e.g. /usr/local/google_appengine/dev_appserver.py
+11. At the Arguments tab, enter the location of your project root.
+ Using Eclipse variables if your project name is PythonCharts:
+ ${workspace_loc:PythonCharts}
+12. Launch the development app server by clicking the Run button.
+13. Launch a browser and go to http://localhost:8080
diff --git a/tools/python_charts/app.yaml b/tools/python_charts/app.yaml
new file mode 100644
index 0000000..ace1b51
--- /dev/null
+++ b/tools/python_charts/app.yaml
@@ -0,0 +1,9 @@
+application: webrtc-python-charts
+version: 1
+runtime: python
+api_version: 1
+
+handlers:
+
+- url: /*
+ script: webrtc/main.py
\ No newline at end of file
diff --git a/tools/python_charts/data/vp8_hw.py b/tools/python_charts/data/vp8_hw.py
new file mode 100644
index 0000000..48c5770
--- /dev/null
+++ b/tools/python_charts/data/vp8_hw.py
@@ -0,0 +1,49 @@
+# Sample output from the video_quality_measurment program, included only for
+# reference. Geneate your own by running with the --python flag and then change
+# the filenames in main.py
+test_configuration = [{'name': 'name', 'value': 'Quality test'},
+{'name': 'description', 'value': ''},
+{'name': 'test_number', 'value': '0'},
+{'name': 'input_filename', 'value': 'foreman_cif.yuv'},
+{'name': 'output_filename', 'value': 'foreman_cif_out.yuv'},
+{'name': 'output_dir', 'value': '.'},
+{'name': 'packet_size_in_bytes', 'value': '1500'},
+{'name': 'max_payload_size_in_bytes', 'value': '1440'},
+{'name': 'packet_loss_mode', 'value': 'Uniform'},
+{'name': 'packet_loss_probability', 'value': '0.000000'},
+{'name': 'packet_loss_burst_length', 'value': '1'},
+{'name': 'exclude_frame_types', 'value': 'ExcludeOnlyFirstKeyFrame'},
+{'name': 'frame_length_in_bytes', 'value': '152064'},
+{'name': 'use_single_core', 'value': 'False'},
+{'name': 'keyframe_interval;', 'value': '0'},
+{'name': 'video_codec_type', 'value': 'VP8'},
+{'name': 'width', 'value': '352'},
+{'name': 'height', 'value': '288'},
+{'name': 'bit_rate_in_kbps', 'value': '500'},
+]
+frame_data_types = {'frame_number': ('number', 'Frame number'),
+'encoding_successful': ('boolean', 'Encoding successful?'),
+'decoding_successful': ('boolean', 'Decoding successful?'),
+'encode_time': ('number', 'Encode time (us)'),
+'decode_time': ('number', 'Decode time (us)'),
+'encode_return_code': ('number', 'Encode return code'),
+'decode_return_code': ('number', 'Decode return code'),
+'bit_rate': ('number', 'Bit rate (kbps)'),
+'encoded_frame_length': ('number', 'Encoded frame length (bytes)'),
+'frame_type': ('string', 'Frame type'),
+'packets_dropped': ('number', 'Packets dropped'),
+'total_packets': ('number', 'Total packets'),
+'ssim': ('number', 'SSIM'),
+'psnr': ('number', 'PSNR (dB)'),
+}
+frame_data = [{'frame_number': 0, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 94676, 'decode_time': 37942, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 1098, 'encoded_frame_length': 4579, 'frame_type': 'Other', 'packets_dropped': 0, 'total_packets': 4, 'ssim': 0.910364, 'psnr': 35.067258},
+{'frame_number': 1, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 244007, 'decode_time': 39421, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 306, 'encoded_frame_length': 1277, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.911859, 'psnr': 35.115193},
+{'frame_number': 2, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 240508, 'decode_time': 38918, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 330, 'encoded_frame_length': 1379, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.913597, 'psnr': 35.181604},
+{'frame_number': 3, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 243449, 'decode_time': 39664, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 298, 'encoded_frame_length': 1242, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.912378, 'psnr': 35.164710},
+{'frame_number': 4, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 248024, 'decode_time': 39115, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 332, 'encoded_frame_length': 1385, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.911471, 'psnr': 35.109488},
+{'frame_number': 5, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 246910, 'decode_time': 39146, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 416, 'encoded_frame_length': 1734, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.915231, 'psnr': 35.392300},
+{'frame_number': 6, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 242953, 'decode_time': 38827, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 279, 'encoded_frame_length': 1165, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.916130, 'psnr': 35.452889},
+{'frame_number': 7, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 247343, 'decode_time': 41429, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 393, 'encoded_frame_length': 1639, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.919356, 'psnr': 35.647128},
+{'frame_number': 8, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 249529, 'decode_time': 40329, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 487, 'encoded_frame_length': 2033, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.924705, 'psnr': 36.179837},
+{'frame_number': 9, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 249408, 'decode_time': 41716, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 583, 'encoded_frame_length': 2433, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.928433, 'psnr': 36.589875},
+]
diff --git a/tools/python_charts/data/vp8_sw.py b/tools/python_charts/data/vp8_sw.py
new file mode 100644
index 0000000..1cece43
--- /dev/null
+++ b/tools/python_charts/data/vp8_sw.py
@@ -0,0 +1,49 @@
+# Sample output from the video_quality_measurment program, included only for
+# reference. Geneate your own by running with the --python flag and then change
+# the filenames in main.py
+test_configuration = [{'name': 'name', 'value': 'Quality test'},
+{'name': 'description', 'value': ''},
+{'name': 'test_number', 'value': '0'},
+{'name': 'input_filename', 'value': 'foreman_cif.yuv'},
+{'name': 'output_filename', 'value': 'foreman_cif_out.yuv'},
+{'name': 'output_dir', 'value': '.'},
+{'name': 'packet_size_in_bytes', 'value': '1500'},
+{'name': 'max_payload_size_in_bytes', 'value': '1440'},
+{'name': 'packet_loss_mode', 'value': 'Uniform'},
+{'name': 'packet_loss_probability', 'value': '0.000000'},
+{'name': 'packet_loss_burst_length', 'value': '1'},
+{'name': 'exclude_frame_types', 'value': 'ExcludeOnlyFirstKeyFrame'},
+{'name': 'frame_length_in_bytes', 'value': '152064'},
+{'name': 'use_single_core', 'value': 'False'},
+{'name': 'keyframe_interval;', 'value': '0'},
+{'name': 'video_codec_type', 'value': 'VP8'},
+{'name': 'width', 'value': '352'},
+{'name': 'height', 'value': '288'},
+{'name': 'bit_rate_in_kbps', 'value': '500'},
+]
+frame_data_types = {'frame_number': ('number', 'Frame number'),
+'encoding_successful': ('boolean', 'Encoding successful?'),
+'decoding_successful': ('boolean', 'Decoding successful?'),
+'encode_time': ('number', 'Encode time (us)'),
+'decode_time': ('number', 'Decode time (us)'),
+'encode_return_code': ('number', 'Encode return code'),
+'decode_return_code': ('number', 'Decode return code'),
+'bit_rate': ('number', 'Bit rate (kbps)'),
+'encoded_frame_length': ('number', 'Encoded frame length (bytes)'),
+'frame_type': ('string', 'Frame type'),
+'packets_dropped': ('number', 'Packets dropped'),
+'total_packets': ('number', 'Total packets'),
+'ssim': ('number', 'SSIM'),
+'psnr': ('number', 'PSNR (dB)'),
+}
+frame_data = [{'frame_number': 0, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 12427, 'decode_time': 4403, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 2270, 'encoded_frame_length': 9459, 'frame_type': 'Other', 'packets_dropped': 0, 'total_packets': 7, 'ssim': 0.947050, 'psnr': 38.332820},
+{'frame_number': 1, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 3292, 'decode_time': 821, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 88, 'encoded_frame_length': 368, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.927272, 'psnr': 35.883510},
+{'frame_number': 2, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4295, 'decode_time': 902, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 130, 'encoded_frame_length': 544, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.920539, 'psnr': 35.457107},
+{'frame_number': 3, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 3880, 'decode_time': 767, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 171, 'encoded_frame_length': 714, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.917434, 'psnr': 35.389298},
+{'frame_number': 4, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4471, 'decode_time': 909, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 248, 'encoded_frame_length': 1035, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.918892, 'psnr': 35.570229},
+{'frame_number': 5, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4447, 'decode_time': 976, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 269, 'encoded_frame_length': 1123, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.920609, 'psnr': 35.769663},
+{'frame_number': 6, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4432, 'decode_time': 891, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 271, 'encoded_frame_length': 1132, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 1, 'ssim': 0.922672, 'psnr': 35.913519},
+{'frame_number': 7, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 5026, 'decode_time': 1068, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 366, 'encoded_frame_length': 1529, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.925505, 'psnr': 36.246713},
+{'frame_number': 8, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4877, 'decode_time': 1051, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 369, 'encoded_frame_length': 1538, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.926122, 'psnr': 36.305984},
+{'frame_number': 9, 'encoding_successful': True , 'decoding_successful': True , 'encode_time': 4712, 'decode_time': 1087, 'encode_return_code': 0, 'decode_return_code': 0, 'bit_rate': 406, 'encoded_frame_length': 1692, 'frame_type': 'Delta', 'packets_dropped': 0, 'total_packets': 2, 'ssim': 0.927183, 'psnr': 36.379735},
+]
diff --git a/tools/python_charts/gviz_api.py b/tools/python_charts/gviz_api.py
new file mode 100755
index 0000000..8d07d20
--- /dev/null
+++ b/tools/python_charts/gviz_api.py
@@ -0,0 +1,1048 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+"""Converts Python data into data for Google Visualization API clients.
+
+This library can be used to create a google.visualization.DataTable usable by
+visualizations built on the Google Visualization API. Output formats are raw
+JSON, JSON response, and JavaScript.
+
+See http://code.google.com/apis/visualization/ for documentation on the
+Google Visualization API.
+"""
+
+__author__ = "Amit Weinstein, Misha Seltzer"
+
+import cgi
+import datetime
+import types
+
+
+class DataTableException(Exception):
+ """The general exception object thrown by DataTable."""
+ pass
+
+
+class DataTable(object):
+ """Wraps the data to convert to a Google Visualization API DataTable.
+
+ Create this object, populate it with data, then call one of the ToJS...
+ methods to return a string representation of the data in the format described.
+
+ You can clear all data from the object to reuse it, but you cannot clear
+ individual cells, rows, or columns. You also cannot modify the table schema
+ specified in the class constructor.
+
+ You can add new data one or more rows at a time. All data added to an
+ instantiated DataTable must conform to the schema passed in to __init__().
+
+ You can reorder the columns in the output table, and also specify row sorting
+ order by column. The default column order is according to the original
+ table_description parameter. Default row sort order is ascending, by column
+ 1 values. For a dictionary, we sort the keys for order.
+
+ The data and the table_description are closely tied, as described here:
+
+ The table schema is defined in the class constructor's table_description
+ parameter. The user defines each column using a tuple of
+ (id[, type[, label[, custom_properties]]]). The default value for type is
+ string, label is the same as ID if not specified, and custom properties is
+ an empty dictionary if not specified.
+
+ table_description is a dictionary or list, containing one or more column
+ descriptor tuples, nested dictionaries, and lists. Each dictionary key, list
+ element, or dictionary element must eventually be defined as
+ a column description tuple. Here's an example of a dictionary where the key
+ is a tuple, and the value is a list of two tuples:
+ {('a', 'number'): [('b', 'number'), ('c', 'string')]}
+
+ This flexibility in data entry enables you to build and manipulate your data
+ in a Python structure that makes sense for your program.
+
+ Add data to the table using the same nested design as the table's
+ table_description, replacing column descriptor tuples with cell data, and
+ each row is an element in the top level collection. This will be a bit
+ clearer after you look at the following examples showing the
+ table_description, matching data, and the resulting table:
+
+ Columns as list of tuples [col1, col2, col3]
+ table_description: [('a', 'number'), ('b', 'string')]
+ AppendData( [[1, 'z'], [2, 'w'], [4, 'o'], [5, 'k']] )
+ Table:
+ a b <--- these are column ids/labels
+ 1 z
+ 2 w
+ 4 o
+ 5 k
+
+ Dictionary of columns, where key is a column, and value is a list of
+ columns {col1: [col2, col3]}
+ table_description: {('a', 'number'): [('b', 'number'), ('c', 'string')]}
+ AppendData( data: {1: [2, 'z'], 3: [4, 'w']}
+ Table:
+ a b c
+ 1 2 z
+ 3 4 w
+
+ Dictionary where key is a column, and the value is itself a dictionary of
+ columns {col1: {col2, col3}}
+ table_description: {('a', 'number'): {'b': 'number', 'c': 'string'}}
+ AppendData( data: {1: {'b': 2, 'c': 'z'}, 3: {'b': 4, 'c': 'w'}}
+ Table:
+ a b c
+ 1 2 z
+ 3 4 w
+ """
+
+ def __init__(self, table_description, data=None, custom_properties=None):
+ """Initialize the data table from a table schema and (optionally) data.
+
+ See the class documentation for more information on table schema and data
+ values.
+
+ Args:
+ table_description: A table schema, following one of the formats described
+ in TableDescriptionParser(). Schemas describe the
+ column names, data types, and labels. See
+ TableDescriptionParser() for acceptable formats.
+ data: Optional. If given, fills the table with the given data. The data
+ structure must be consistent with schema in table_description. See
+ the class documentation for more information on acceptable data. You
+ can add data later by calling AppendData().
+ custom_properties: Optional. A dictionary from string to string that
+ goes into the table's custom properties. This can be
+ later changed by changing self.custom_properties.
+
+ Raises:
+ DataTableException: Raised if the data and the description did not match,
+ or did not use the supported formats.
+ """
+ self.__columns = self.TableDescriptionParser(table_description)
+ self.__data = []
+ self.custom_properties = {}
+ if custom_properties is not None:
+ self.custom_properties = custom_properties
+ if data:
+ self.LoadData(data)
+
+ @staticmethod
+ def _EscapeValueForCsv(v):
+ """Escapes the value for use in a CSV file.
+
+ Puts the string in double-quotes, and escapes any inner double-quotes by
+ doubling them.
+
+ Args:
+ v: The value to escape.
+
+ Returns:
+ The escaped values.
+ """
+ return '"%s"' % v.replace('"', '""')
+
+ @staticmethod
+ def _EscapeValue(v):
+ """Puts the string in quotes, and escapes any inner quotes and slashes."""
+ if isinstance(v, unicode):
+ # Here we use repr as in the usual case, but on unicode strings, it
+ # also escapes the unicode characters (which we want to leave as is).
+ # So, after repr() we decode using raw-unicode-escape, which decodes
+ # only the unicode characters, and leaves all the rest (", ', \n and
+ # more) escaped.
+ # We don't take the first character, because repr adds a u in the
+ # beginning of the string (usual repr output for unicode is u'...').
+ return repr(v).decode("raw-unicode-escape")[1:]
+ # Here we use python built-in escaping mechanism for string using repr.
+ return repr(str(v))
+
+ @staticmethod
+ def _EscapeCustomProperties(custom_properties):
+ """Escapes the custom properties dictionary."""
+ l = []
+ for key, value in custom_properties.iteritems():
+ l.append("%s:%s" % (DataTable._EscapeValue(key),
+ DataTable._EscapeValue(value)))
+ return "{%s}" % ",".join(l)
+
+ @staticmethod
+ def SingleValueToJS(value, value_type, escape_func=None):
+ """Translates a single value and type into a JS value.
+
+ Internal helper method.
+
+ Args:
+ value: The value which should be converted
+ value_type: One of "string", "number", "boolean", "date", "datetime" or
+ "timeofday".
+ escape_func: The function to use for escaping strings.
+
+ Returns:
+ The proper JS format (as string) of the given value according to the
+ given value_type. For None, we simply return "null".
+ If a tuple is given, it should be in one of the following forms:
+ - (value, formatted value)
+ - (value, formatted value, custom properties)
+ where the formatted value is a string, and custom properties is a
+ dictionary of the custom properties for this cell.
+ To specify custom properties without specifying formatted value, one can
+ pass None as the formatted value.
+ One can also have a null-valued cell with formatted value and/or custom
+ properties by specifying None for the value.
+ This method ignores the custom properties except for checking that it is a
+ dictionary. The custom properties are handled in the ToJSon and ToJSCode
+ methods.
+ The real type of the given value is not strictly checked. For example,
+ any type can be used for string - as we simply take its str( ) and for
+ boolean value we just check "if value".
+ Examples:
+ SingleValueToJS(None, "boolean") returns "null"
+ SingleValueToJS(False, "boolean") returns "false"
+ SingleValueToJS((5, "5$"), "number") returns ("5", "'5$'")
+ SingleValueToJS((None, "5$"), "number") returns ("null", "'5$'")
+
+ Raises:
+ DataTableException: The value and type did not match in a not-recoverable
+ way, for example given value 'abc' for type 'number'.
+ """
+ if escape_func is None:
+ escape_func = DataTable._EscapeValue
+ if isinstance(value, tuple):
+ # In case of a tuple, we run the same function on the value itself and
+ # add the formatted value.
+ if (len(value) not in [2, 3] or
+ (len(value) == 3 and not isinstance(value[2], dict))):
+ raise DataTableException("Wrong format for value and formatting - %s." %
+ str(value))
+ if not isinstance(value[1], types.StringTypes + (types.NoneType,)):
+ raise DataTableException("Formatted value is not string, given %s." %
+ type(value[1]))
+ js_value = DataTable.SingleValueToJS(value[0], value_type)
+ if value[1] is None:
+ return (js_value, None)
+ return (js_value, escape_func(value[1]))
+
+ # The standard case - no formatting.
+ t_value = type(value)
+ if value is None:
+ return "null"
+ if value_type == "boolean":
+ if value:
+ return "true"
+ return "false"
+
+ elif value_type == "number":
+ if isinstance(value, (int, long, float)):
+ return str(value)
+ raise DataTableException("Wrong type %s when expected number" % t_value)
+
+ elif value_type == "string":
+ if isinstance(value, tuple):
+ raise DataTableException("Tuple is not allowed as string value.")
+ return escape_func(value)
+
+ elif value_type == "date":
+ if not isinstance(value, (datetime.date, datetime.datetime)):
+ raise DataTableException("Wrong type %s when expected date" % t_value)
+ # We need to shift the month by 1 to match JS Date format
+ return "new Date(%d,%d,%d)" % (value.year, value.month - 1, value.day)
+
+ elif value_type == "timeofday":
+ if not isinstance(value, (datetime.time, datetime.datetime)):
+ raise DataTableException("Wrong type %s when expected time" % t_value)
+ return "[%d,%d,%d]" % (value.hour, value.minute, value.second)
+
+ elif value_type == "datetime":
+ if not isinstance(value, datetime.datetime):
+ raise DataTableException("Wrong type %s when expected datetime" %
+ t_value)
+ return "new Date(%d,%d,%d,%d,%d,%d)" % (value.year,
+ value.month - 1, # To match JS
+ value.day,
+ value.hour,
+ value.minute,
+ value.second)
+ # If we got here, it means the given value_type was not one of the
+ # supported types.
+ raise DataTableException("Unsupported type %s" % value_type)
+
+ @staticmethod
+ def ColumnTypeParser(description):
+ """Parses a single column description. Internal helper method.
+
+ Args:
+ description: a column description in the possible formats:
+ 'id'
+ ('id',)
+ ('id', 'type')
+ ('id', 'type', 'label')
+ ('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
+ Returns:
+ Dictionary with the following keys: id, label, type, and
+ custom_properties where:
+ - If label not given, it equals the id.
+ - If type not given, string is used by default.
+ - If custom properties are not given, an empty dictionary is used by
+ default.
+
+ Raises:
+ DataTableException: The column description did not match the RE, or
+ unsupported type was passed.
+ """
+ if not description:
+ raise DataTableException("Description error: empty description given")
+
+ if not isinstance(description, (types.StringTypes, tuple)):
+ raise DataTableException("Description error: expected either string or "
+ "tuple, got %s." % type(description))
+
+ if isinstance(description, types.StringTypes):
+ description = (description,)
+
+ # According to the tuple's length, we fill the keys
+ # We verify everything is of type string
+ for elem in description[:3]:
+ if not isinstance(elem, types.StringTypes):
+ raise DataTableException("Description error: expected tuple of "
+ "strings, current element of type %s." %
+ type(elem))
+ desc_dict = {"id": description[0],
+ "label": description[0],
+ "type": "string",
+ "custom_properties": {}}
+ if len(description) > 1:
+ desc_dict["type"] = description[1].lower()
+ if len(description) > 2:
+ desc_dict["label"] = description[2]
+ if len(description) > 3:
+ if not isinstance(description[3], dict):
+ raise DataTableException("Description error: expected custom "
+ "properties of type dict, current element "
+ "of type %s." % type(description[3]))
+ desc_dict["custom_properties"] = description[3]
+ if len(description) > 4:
+ raise DataTableException("Description error: tuple of length > 4")
+ if desc_dict["type"] not in ["string", "number", "boolean",
+ "date", "datetime", "timeofday"]:
+ raise DataTableException(
+ "Description error: unsupported type '%s'" % desc_dict["type"])
+ return desc_dict
+
+ @staticmethod
+ def TableDescriptionParser(table_description, depth=0):
+ """Parses the table_description object for internal use.
+
+ Parses the user-submitted table description into an internal format used
+ by the Python DataTable class. Returns the flat list of parsed columns.
+
+ Args:
+ table_description: A description of the table which should comply
+ with one of the formats described below.
+ depth: Optional. The depth of the first level in the current description.
+ Used by recursive calls to this function.
+
+ Returns:
+ List of columns, where each column represented by a dictionary with the
+ keys: id, label, type, depth, container which means the following:
+ - id: the id of the column
+ - name: The name of the column
+ - type: The datatype of the elements in this column. Allowed types are
+ described in ColumnTypeParser().
+ - depth: The depth of this column in the table description
+ - container: 'dict', 'iter' or 'scalar' for parsing the format easily.
+ - custom_properties: The custom properties for this column.
+ The returned description is flattened regardless of how it was given.
+
+ Raises:
+ DataTableException: Error in a column description or in the description
+ structure.
+
+ Examples:
+ A column description can be of the following forms:
+ 'id'
+ ('id',)
+ ('id', 'type')
+ ('id', 'type', 'label')
+ ('id', 'type', 'label', {'custom_prop1': 'custom_val1'})
+ or as a dictionary:
+ 'id': 'type'
+ 'id': ('type',)
+ 'id': ('type', 'label')
+ 'id': ('type', 'label', {'custom_prop1': 'custom_val1'})
+ If the type is not specified, we treat it as string.
+ If no specific label is given, the label is simply the id.
+ If no custom properties are given, we use an empty dictionary.
+
+ input: [('a', 'date'), ('b', 'timeofday', 'b', {'foo': 'bar'})]
+ output: [{'id': 'a', 'label': 'a', 'type': 'date',
+ 'depth': 0, 'container': 'iter', 'custom_properties': {}},
+ {'id': 'b', 'label': 'b', 'type': 'timeofday',
+ 'depth': 0, 'container': 'iter',
+ 'custom_properties': {'foo': 'bar'}}]
+
+ input: {'a': [('b', 'number'), ('c', 'string', 'column c')]}
+ output: [{'id': 'a', 'label': 'a', 'type': 'string',
+ 'depth': 0, 'container': 'dict', 'custom_properties': {}},
+ {'id': 'b', 'label': 'b', 'type': 'number',
+ 'depth': 1, 'container': 'iter', 'custom_properties': {}},
+ {'id': 'c', 'label': 'column c', 'type': 'string',
+ 'depth': 1, 'container': 'iter', 'custom_properties': {}}]
+
+ input: {('a', 'number', 'column a'): { 'b': 'number', 'c': 'string'}}
+ output: [{'id': 'a', 'label': 'column a', 'type': 'number',
+ 'depth': 0, 'container': 'dict', 'custom_properties': {}},
+ {'id': 'b', 'label': 'b', 'type': 'number',
+ 'depth': 1, 'container': 'dict', 'custom_properties': {}},
+ {'id': 'c', 'label': 'c', 'type': 'string',
+ 'depth': 1, 'container': 'dict', 'custom_properties': {}}]
+
+ input: { ('w', 'string', 'word'): ('c', 'number', 'count') }
+ output: [{'id': 'w', 'label': 'word', 'type': 'string',
+ 'depth': 0, 'container': 'dict', 'custom_properties': {}},
+ {'id': 'c', 'label': 'count', 'type': 'number',
+ 'depth': 1, 'container': 'scalar', 'custom_properties': {}}]
+
+ input: {'a': ('number', 'column a'), 'b': ('string', 'column b')}
+ output: [{'id': 'a', 'label': 'column a', 'type': 'number', 'depth': 0,
+ 'container': 'dict', 'custom_properties': {}},
+ {'id': 'b', 'label': 'column b', 'type': 'string', 'depth': 0,
+ 'container': 'dict', 'custom_properties': {}}
+
+ NOTE: there might be ambiguity in the case of a dictionary representation
+ of a single column. For example, the following description can be parsed
+ in 2 different ways: {'a': ('b', 'c')} can be thought of a single column
+ with the id 'a', of type 'b' and the label 'c', or as 2 columns: one named
+ 'a', and the other named 'b' of type 'c'. We choose the first option by
+ default, and in case the second option is the right one, it is possible to
+ make the key into a tuple (i.e. {('a',): ('b', 'c')}) or add more info
+ into the tuple, thus making it look like this: {'a': ('b', 'c', 'b', {})}
+ -- second 'b' is the label, and {} is the custom properties field.
+ """
+ # For the recursion step, we check for a scalar object (string or tuple)
+ if isinstance(table_description, (types.StringTypes, tuple)):
+ parsed_col = DataTable.ColumnTypeParser(table_description)
+ parsed_col["depth"] = depth
+ parsed_col["container"] = "scalar"
+ return [parsed_col]
+
+ # Since it is not scalar, table_description must be iterable.
+ if not hasattr(table_description, "__iter__"):
+ raise DataTableException("Expected an iterable object, got %s" %
+ type(table_description))
+ if not isinstance(table_description, dict):
+ # We expects a non-dictionary iterable item.
+ columns = []
+ for desc in table_description:
+ parsed_col = DataTable.ColumnTypeParser(desc)
+ parsed_col["depth"] = depth
+ parsed_col["container"] = "iter"
+ columns.append(parsed_col)
+ if not columns:
+ raise DataTableException("Description iterable objects should not"
+ " be empty.")
+ return columns
+ # The other case is a dictionary
+ if not table_description:
+ raise DataTableException("Empty dictionaries are not allowed inside"
+ " description")
+
+ # To differentiate between the two cases of more levels below or this is
+ # the most inner dictionary, we consider the number of keys (more then one
+ # key is indication for most inner dictionary) and the type of the key and
+ # value in case of only 1 key (if the type of key is string and the type of
+ # the value is a tuple of 0-3 items, we assume this is the most inner
+ # dictionary).
+ # NOTE: this way of differentiating might create ambiguity. See docs.
+ if (len(table_description) != 1 or
+ (isinstance(table_description.keys()[0], types.StringTypes) and
+ isinstance(table_description.values()[0], tuple) and
+ len(table_description.values()[0]) < 4)):
+ # This is the most inner dictionary. Parsing types.
+ columns = []
+ # We sort the items, equivalent to sort the keys since they are unique
+ for key, value in sorted(table_description.items()):
+ # We parse the column type as (key, type) or (key, type, label) using
+ # ColumnTypeParser.
+ if isinstance(value, tuple):
+ parsed_col = DataTable.ColumnTypeParser((key,) + value)
+ else:
+ parsed_col = DataTable.ColumnTypeParser((key, value))
+ parsed_col["depth"] = depth
+ parsed_col["container"] = "dict"
+ columns.append(parsed_col)
+ return columns
+ # This is an outer dictionary, must have at most one key.
+ parsed_col = DataTable.ColumnTypeParser(table_description.keys()[0])
+ parsed_col["depth"] = depth
+ parsed_col["container"] = "dict"
+ return ([parsed_col] +
+ DataTable.TableDescriptionParser(table_description.values()[0],
+ depth=depth + 1))
+
+ @property
+ def columns(self):
+ """Returns the parsed table description."""
+ return self.__columns
+
+ def NumberOfRows(self):
+ """Returns the number of rows in the current data stored in the table."""
+ return len(self.__data)
+
+ def SetRowsCustomProperties(self, rows, custom_properties):
+ """Sets the custom properties for given row(s).
+
+ Can accept a single row or an iterable of rows.
+ Sets the given custom properties for all specified rows.
+
+ Args:
+ rows: The row, or rows, to set the custom properties for.
+ custom_properties: A string to string dictionary of custom properties to
+ set for all rows.
+ """
+ if not hasattr(rows, "__iter__"):
+ rows = [rows]
+ for row in rows:
+ self.__data[row] = (self.__data[row][0], custom_properties)
+
+ def LoadData(self, data, custom_properties=None):
+ """Loads new rows to the data table, clearing existing rows.
+
+ May also set the custom_properties for the added rows. The given custom
+ properties dictionary specifies the dictionary that will be used for *all*
+ given rows.
+
+ Args:
+ data: The rows that the table will contain.
+ custom_properties: A dictionary of string to string to set as the custom
+ properties for all rows.
+ """
+ self.__data = []
+ self.AppendData(data, custom_properties)
+
+ def AppendData(self, data, custom_properties=None):
+ """Appends new data to the table.
+
+ Data is appended in rows. Data must comply with
+ the table schema passed in to __init__(). See SingleValueToJS() for a list
+ of acceptable data types. See the class documentation for more information
+ and examples of schema and data values.
+
+ Args:
+ data: The row to add to the table. The data must conform to the table
+ description format.
+ custom_properties: A dictionary of string to string, representing the
+ custom properties to add to all the rows.
+
+ Raises:
+ DataTableException: The data structure does not match the description.
+ """
+ # If the maximal depth is 0, we simply iterate over the data table
+ # lines and insert them using _InnerAppendData. Otherwise, we simply
+ # let the _InnerAppendData handle all the levels.
+ if not self.__columns[-1]["depth"]:
+ for row in data:
+ self._InnerAppendData(({}, custom_properties), row, 0)
+ else:
+ self._InnerAppendData(({}, custom_properties), data, 0)
+
+ def _InnerAppendData(self, prev_col_values, data, col_index):
+ """Inner function to assist LoadData."""
+ # We first check that col_index has not exceeded the columns size
+ if col_index >= len(self.__columns):
+ raise DataTableException("The data does not match description, too deep")
+
+ # Dealing with the scalar case, the data is the last value.
+ if self.__columns[col_index]["container"] == "scalar":
+ prev_col_values[0][self.__columns[col_index]["id"]] = data
+ self.__data.append(prev_col_values)
+ return
+
+ if self.__columns[col_index]["container"] == "iter":
+ if not hasattr(data, "__iter__") or isinstance(data, dict):
+ raise DataTableException("Expected iterable object, got %s" %
+ type(data))
+ # We only need to insert the rest of the columns
+ # If there are less items than expected, we only add what there is.
+ for value in data:
+ if col_index >= len(self.__columns):
+ raise DataTableException("Too many elements given in data")
+ prev_col_values[0][self.__columns[col_index]["id"]] = value
+ col_index += 1
+ self.__data.append(prev_col_values)
+ return
+
+ # We know the current level is a dictionary, we verify the type.
+ if not isinstance(data, dict):
+ raise DataTableException("Expected dictionary at current level, got %s" %
+ type(data))
+ # We check if this is the last level
+ if self.__columns[col_index]["depth"] == self.__columns[-1]["depth"]:
+ # We need to add the keys in the dictionary as they are
+ for col in self.__columns[col_index:]:
+ if col["id"] in data:
+ prev_col_values[0][col["id"]] = data[col["id"]]
+ self.__data.append(prev_col_values)
+ return
+
+ # We have a dictionary in an inner depth level.
+ if not data.keys():
+ # In case this is an empty dictionary, we add a record with the columns
+ # filled only until this point.
+ self.__data.append(prev_col_values)
+ else:
+ for key in sorted(data):
+ col_values = dict(prev_col_values[0])
+ col_values[self.__columns[col_index]["id"]] = key
+ self._InnerAppendData((col_values, prev_col_values[1]),
+ data[key], col_index + 1)
+
+ def _PreparedData(self, order_by=()):
+ """Prepares the data for enumeration - sorting it by order_by.
+
+ Args:
+ order_by: Optional. Specifies the name of the column(s) to sort by, and
+ (optionally) which direction to sort in. Default sort direction
+ is asc. Following formats are accepted:
+ "string_col_name" -- For a single key in default (asc) order.
+ ("string_col_name", "asc|desc") -- For a single key.
+ [("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
+ one column, an array of tuples of (col_name, "asc|desc").
+
+ Returns:
+ The data sorted by the keys given.
+
+ Raises:
+ DataTableException: Sort direction not in 'asc' or 'desc'
+ """
+ if not order_by:
+ return self.__data
+
+ proper_sort_keys = []
+ if isinstance(order_by, types.StringTypes) or (
+ isinstance(order_by, tuple) and len(order_by) == 2 and
+ order_by[1].lower() in ["asc", "desc"]):
+ order_by = (order_by,)
+ for key in order_by:
+ if isinstance(key, types.StringTypes):
+ proper_sort_keys.append((key, 1))
+ elif (isinstance(key, (list, tuple)) and len(key) == 2 and
+ key[1].lower() in ("asc", "desc")):
+ proper_sort_keys.append((key[0], key[1].lower() == "asc" and 1 or -1))
+ else:
+ raise DataTableException("Expected tuple with second value: "
+ "'asc' or 'desc'")
+
+ def SortCmpFunc(row1, row2):
+ """cmp function for sorted. Compares by keys and 'asc'/'desc' keywords."""
+ for key, asc_mult in proper_sort_keys:
+ cmp_result = asc_mult * cmp(row1[0].get(key), row2[0].get(key))
+ if cmp_result:
+ return cmp_result
+ return 0
+
+ return sorted(self.__data, cmp=SortCmpFunc)
+
+ def ToJSCode(self, name, columns_order=None, order_by=()):
+ """Writes the data table as a JS code string.
+
+ This method writes a string of JS code that can be run to
+ generate a DataTable with the specified data. Typically used for debugging
+ only.
+
+ Args:
+ name: The name of the table. The name would be used as the DataTable's
+ variable name in the created JS code.
+ columns_order: Optional. Specifies the order of columns in the
+ output table. Specify a list of all column IDs in the order
+ in which you want the table created.
+ Note that you must list all column IDs in this parameter,
+ if you use it.
+ order_by: Optional. Specifies the name of the column(s) to sort by.
+ Passed as is to _PreparedData.
+
+ Returns:
+ A string of JS code that, when run, generates a DataTable with the given
+ name and the data stored in the DataTable object.
+ Example result:
+ "var tab1 = new google.visualization.DataTable();
+ tab1.addColumn('string', 'a', 'a');
+ tab1.addColumn('number', 'b', 'b');
+ tab1.addColumn('boolean', 'c', 'c');
+ tab1.addRows(10);
+ tab1.setCell(0, 0, 'a');
+ tab1.setCell(0, 1, 1, null, {'foo': 'bar'});
+ tab1.setCell(0, 2, true);
+ ...
+ tab1.setCell(9, 0, 'c');
+ tab1.setCell(9, 1, 3, '3$');
+ tab1.setCell(9, 2, false);"
+
+ Raises:
+ DataTableException: The data does not match the type.
+ """
+ if columns_order is None:
+ columns_order = [col["id"] for col in self.__columns]
+ col_dict = dict([(col["id"], col) for col in self.__columns])
+
+ # We first create the table with the given name
+ jscode = "var %s = new google.visualization.DataTable();\n" % name
+ if self.custom_properties:
+ jscode += "%s.setTableProperties(%s);\n" % (
+ name, DataTable._EscapeCustomProperties(self.custom_properties))
+
+ # We add the columns to the table
+ for i, col in enumerate(columns_order):
+ jscode += "%s.addColumn('%s', %s, %s);\n" % (
+ name,
+ col_dict[col]["type"],
+ DataTable._EscapeValue(col_dict[col]["label"]),
+ DataTable._EscapeValue(col_dict[col]["id"]))
+ if col_dict[col]["custom_properties"]:
+ jscode += "%s.setColumnProperties(%d, %s);\n" % (
+ name, i, DataTable._EscapeCustomProperties(
+ col_dict[col]["custom_properties"]))
+ jscode += "%s.addRows(%d);\n" % (name, len(self.__data))
+
+ # We now go over the data and add each row
+ for (i, (row, cp)) in enumerate(self._PreparedData(order_by)):
+ # We add all the elements of this row by their order
+ for (j, col) in enumerate(columns_order):
+ if col not in row or row[col] is None:
+ continue
+ cell_cp = ""
+ if isinstance(row[col], tuple) and len(row[col]) == 3:
+ cell_cp = ", %s" % DataTable._EscapeCustomProperties(row[col][2])
+ value = self.SingleValueToJS(row[col], col_dict[col]["type"])
+ if isinstance(value, tuple):
+ # We have a formatted value or custom property as well
+ if value[1] is None:
+ value = (value[0], "null")
+ jscode += ("%s.setCell(%d, %d, %s, %s%s);\n" %
+ (name, i, j, value[0], value[1], cell_cp))
+ else:
+ jscode += "%s.setCell(%d, %d, %s);\n" % (name, i, j, value)
+ if cp:
+ jscode += "%s.setRowProperties(%d, %s);\n" % (
+ name, i, DataTable._EscapeCustomProperties(cp))
+ return jscode
+
+ def ToHtml(self, columns_order=None, order_by=()):
+ """Writes the data table as an HTML table code string.
+
+ Args:
+ columns_order: Optional. Specifies the order of columns in the
+ output table. Specify a list of all column IDs in the order
+ in which you want the table created.
+ Note that you must list all column IDs in this parameter,
+ if you use it.
+ order_by: Optional. Specifies the name of the column(s) to sort by.
+ Passed as is to _PreparedData.
+
+ Returns:
+ An HTML table code string.
+ Example result (the result is without the newlines):
+ <html><body><table border='1'>
+ <thead><tr><th>a</th><th>b</th><th>c</th></tr></thead>
+ <tbody>
+ <tr><td>1</td><td>"z"</td><td>2</td></tr>
+ <tr><td>"3$"</td><td>"w"</td><td></td></tr>
+ </tbody>
+ </table></body></html>
+
+ Raises:
+ DataTableException: The data does not match the type.
+ """
+ table_template = "<html><body><table border='1'>%s</table></body></html>"
+ columns_template = "<thead><tr>%s</tr></thead>"
+ rows_template = "<tbody>%s</tbody>"
+ row_template = "<tr>%s</tr>"
+ header_cell_template = "<th>%s</th>"
+ cell_template = "<td>%s</td>"
+
+ if columns_order is None:
+ columns_order = [col["id"] for col in self.__columns]
+ col_dict = dict([(col["id"], col) for col in self.__columns])
+
+ columns_list = []
+ for col in columns_order:
+ columns_list.append(header_cell_template %
+ cgi.escape(col_dict[col]["label"]))
+ columns_html = columns_template % "".join(columns_list)
+
+ rows_list = []
+ # We now go over the data and add each row
+ for row, unused_cp in self._PreparedData(order_by):
+ cells_list = []
+ # We add all the elements of this row by their order
+ for col in columns_order:
+ # For empty string we want empty quotes ("").
+ value = ""
+ if col in row and row[col] is not None:
+ value = self.SingleValueToJS(row[col], col_dict[col]["type"])
+ if isinstance(value, tuple):
+ # We have a formatted value and we're going to use it
+ cells_list.append(cell_template % cgi.escape(value[1]))
+ else:
+ cells_list.append(cell_template % cgi.escape(value))
+ rows_list.append(row_template % "".join(cells_list))
+ rows_html = rows_template % "".join(rows_list)
+
+ return table_template % (columns_html + rows_html)
+
+ def ToCsv(self, columns_order=None, order_by=(), separator=", "):
+ """Writes the data table as a CSV string.
+
+ Args:
+ columns_order: Optional. Specifies the order of columns in the
+ output table. Specify a list of all column IDs in the order
+ in which you want the table created.
+ Note that you must list all column IDs in this parameter,
+ if you use it.
+ order_by: Optional. Specifies the name of the column(s) to sort by.
+ Passed as is to _PreparedData.
+ separator: Optional. The separator to use between the values.
+
+ Returns:
+ A CSV string representing the table.
+ Example result:
+ 'a', 'b', 'c'
+ 1, 'z', 2
+ 3, 'w', ''
+
+ Raises:
+ DataTableException: The data does not match the type.
+ """
+ if columns_order is None:
+ columns_order = [col["id"] for col in self.__columns]
+ col_dict = dict([(col["id"], col) for col in self.__columns])
+
+ columns_list = []
+ for col in columns_order:
+ columns_list.append(DataTable._EscapeValueForCsv(col_dict[col]["label"]))
+ columns_line = separator.join(columns_list)
+
+ rows_list = []
+ # We now go over the data and add each row
+ for row, unused_cp in self._PreparedData(order_by):
+ cells_list = []
+ # We add all the elements of this row by their order
+ for col in columns_order:
+ value = '""'
+ if col in row and row[col] is not None:
+ value = self.SingleValueToJS(row[col], col_dict[col]["type"],
+ DataTable._EscapeValueForCsv)
+ if isinstance(value, tuple):
+ # We have a formatted value. Using it only for date/time types.
+ if col_dict[col]["type"] in ["date", "datetime", "timeofday"]:
+ cells_list.append(value[1])
+ else:
+ cells_list.append(value[0])
+ else:
+ # We need to quote date types, because they contain commas.
+ if (col_dict[col]["type"] in ["date", "datetime", "timeofday"] and
+ value != '""'):
+ value = '"%s"' % value
+ cells_list.append(value)
+ rows_list.append(separator.join(cells_list))
+ rows = "\n".join(rows_list)
+
+ return "%s\n%s" % (columns_line, rows)
+
+ def ToTsvExcel(self, columns_order=None, order_by=()):
+ """Returns a file in tab-separated-format readable by MS Excel.
+
+ Returns a file in UTF-16 little endian encoding, with tabs separating the
+ values.
+
+ Args:
+ columns_order: Delegated to ToCsv.
+ order_by: Delegated to ToCsv.
+
+ Returns:
+ A tab-separated little endian UTF16 file representing the table.
+ """
+ return self.ToCsv(
+ columns_order, order_by, separator="\t").encode("UTF-16LE")
+
+ def ToJSon(self, columns_order=None, order_by=()):
+ """Writes a JSON string that can be used in a JS DataTable constructor.
+
+ This method writes a JSON string that can be passed directly into a Google
+ Visualization API DataTable constructor. Use this output if you are
+ hosting the visualization HTML on your site, and want to code the data
+ table in Python. Pass this string into the
+ google.visualization.DataTable constructor, e.g,:
+ ... on my page that hosts my visualization ...
+ google.setOnLoadCallback(drawTable);
+ function drawTable() {
+ var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
+ myTable.draw(data);
+ }
+
+ Args:
+ columns_order: Optional. Specifies the order of columns in the
+ output table. Specify a list of all column IDs in the order
+ in which you want the table created.
+ Note that you must list all column IDs in this parameter,
+ if you use it.
+ order_by: Optional. Specifies the name of the column(s) to sort by.
+ Passed as is to _PreparedData().
+
+ Returns:
+ A JSon constructor string to generate a JS DataTable with the data
+ stored in the DataTable object.
+ Example result (the result is without the newlines):
+ {cols: [{id:'a',label:'a',type:'number'},
+ {id:'b',label:'b',type:'string'},
+ {id:'c',label:'c',type:'number'}],
+ rows: [{c:[{v:1},{v:'z'},{v:2}]}, c:{[{v:3,f:'3$'},{v:'w'},{v:null}]}],
+ p: {'foo': 'bar'}}
+
+ Raises:
+ DataTableException: The data does not match the type.
+ """
+ if columns_order is None:
+ columns_order = [col["id"] for col in self.__columns]
+ col_dict = dict([(col["id"], col) for col in self.__columns])
+
+ # Creating the columns jsons
+ cols_jsons = []
+ for col_id in columns_order:
+ d = dict(col_dict[col_id])
+ d["id"] = DataTable._EscapeValue(d["id"])
+ d["label"] = DataTable._EscapeValue(d["label"])
+ d["cp"] = ""
+ if col_dict[col_id]["custom_properties"]:
+ d["cp"] = ",p:%s" % DataTable._EscapeCustomProperties(
+ col_dict[col_id]["custom_properties"])
+ cols_jsons.append(
+ "{id:%(id)s,label:%(label)s,type:'%(type)s'%(cp)s}" % d)
+
+ # Creating the rows jsons
+ rows_jsons = []
+ for row, cp in self._PreparedData(order_by):
+ cells_jsons = []
+ for col in columns_order:
+ # We omit the {v:null} for a None value of the not last column
+ value = row.get(col, None)
+ if value is None and col != columns_order[-1]:
+ cells_jsons.append("")
+ else:
+ value = self.SingleValueToJS(value, col_dict[col]["type"])
+ if isinstance(value, tuple):
+ # We have a formatted value or custom property as well
+ if len(row.get(col)) == 3:
+ if value[1] is None:
+ cells_jsons.append("{v:%s,p:%s}" % (
+ value[0],
+ DataTable._EscapeCustomProperties(row.get(col)[2])))
+ else:
+ cells_jsons.append("{v:%s,f:%s,p:%s}" % (value + (
+ DataTable._EscapeCustomProperties(row.get(col)[2]),)))
+ else:
+ cells_jsons.append("{v:%s,f:%s}" % value)
+ else:
+ cells_jsons.append("{v:%s}" % value)
+ if cp:
+ rows_jsons.append("{c:[%s],p:%s}" % (
+ ",".join(cells_jsons), DataTable._EscapeCustomProperties(cp)))
+ else:
+ rows_jsons.append("{c:[%s]}" % ",".join(cells_jsons))
+
+ general_custom_properties = ""
+ if self.custom_properties:
+ general_custom_properties = (
+ ",p:%s" % DataTable._EscapeCustomProperties(self.custom_properties))
+
+ # We now join the columns jsons and the rows jsons
+ json = "{cols:[%s],rows:[%s]%s}" % (",".join(cols_jsons),
+ ",".join(rows_jsons),
+ general_custom_properties)
+ return json
+
+ def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0,
+ response_handler="google.visualization.Query.setResponse"):
+ """Writes a table as a JSON response that can be returned as-is to a client.
+
+ This method writes a JSON response to return to a client in response to a
+ Google Visualization API query. This string can be processed by the calling
+ page, and is used to deliver a data table to a visualization hosted on
+ a different page.
+
+ Args:
+ columns_order: Optional. Passed straight to self.ToJSon().
+ order_by: Optional. Passed straight to self.ToJSon().
+ req_id: Optional. The response id, as retrieved by the request.
+ response_handler: Optional. The response handler, as retrieved by the
+ request.
+
+ Returns:
+ A JSON response string to be received by JS the visualization Query
+ object. This response would be translated into a DataTable on the
+ client side.
+ Example result (newlines added for readability):
+ google.visualization.Query.setResponse({
+ 'version':'0.6', 'reqId':'0', 'status':'OK',
+ 'table': {cols: [...], rows: [...]}});
+
+ Note: The URL returning this string can be used as a data source by Google
+ Visualization Gadgets or from JS code.
+ """
+ table = self.ToJSon(columns_order, order_by)
+ return ("%s({'version':'0.6', 'reqId':'%s', 'status':'OK', "
+ "'table': %s});") % (response_handler, req_id, table)
+
+ def ToResponse(self, columns_order=None, order_by=(), tqx=""):
+ """Writes the right response according to the request string passed in tqx.
+
+ This method parses the tqx request string (format of which is defined in
+ the documentation for implementing a data source of Google Visualization),
+ and returns the right response according to the request.
+ It parses out the "out" parameter of tqx, calls the relevant response
+ (ToJSonResponse() for "json", ToCsv() for "csv", ToHtml() for "html",
+ ToTsvExcel() for "tsv-excel") and passes the response function the rest of
+ the relevant request keys.
+
+ Args:
+ columns_order: Optional. Passed as is to the relevant response function.
+ order_by: Optional. Passed as is to the relevant response function.
+ tqx: Optional. The request string as received by HTTP GET. Should be in
+ the format "key1:value1;key2:value2...". All keys have a default
+ value, so an empty string will just do the default (which is calling
+ ToJSonResponse() with no extra parameters).
+
+ Returns:
+ A response string, as returned by the relevant response function.
+
+ Raises:
+ DataTableException: One of the parameters passed in tqx is not supported.
+ """
+ tqx_dict = {}
+ if tqx:
+ tqx_dict = dict(opt.split(":") for opt in tqx.split(";"))
+ if tqx_dict.get("version", "0.6") != "0.6":
+ raise DataTableException(
+ "Version (%s) passed by request is not supported."
+ % tqx_dict["version"])
+
+ if tqx_dict.get("out", "json") == "json":
+ response_handler = tqx_dict.get("responseHandler",
+ "google.visualization.Query.setResponse")
+ return self.ToJSonResponse(columns_order, order_by,
+ req_id=tqx_dict.get("reqId", 0),
+ response_handler=response_handler)
+ elif tqx_dict["out"] == "html":
+ return self.ToHtml(columns_order, order_by)
+ elif tqx_dict["out"] == "csv":
+ return self.ToCsv(columns_order, order_by)
+ elif tqx_dict["out"] == "tsv-excel":
+ return self.ToTsvExcel(columns_order, order_by)
+ else:
+ raise DataTableException(
+ "'out' parameter: '%s' is not supported" % tqx_dict["out"])
diff --git a/tools/python_charts/templates/chart_page_template.html b/tools/python_charts/templates/chart_page_template.html
new file mode 100644
index 0000000..f241fff
--- /dev/null
+++ b/tools/python_charts/templates/chart_page_template.html
@@ -0,0 +1,80 @@
+<html>
+ <!--
+ Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+
+ Use of this source code is governed by a BSD-style license
+ that can be found in the LICENSE file in the root of the source
+ tree. An additional intellectual property rights grant can be found
+ in the file PATENTS. All contributing project authors may
+ be found in the AUTHORS file in the root of the source tree.
+
+ Template file to be used to generate Charts for Video Quality Metrics.
+ -->
+ <head>
+ <link href="http://code.google.com/css/codesite.pack.04102009.css"
+ rel="stylesheet" type="text/css" />
+ </head>
+ <script src="https://www.google.com/jsapi" type="text/javascript"></script>
+ <script>
+ google.load('visualization', '1', {packages:['table', 'corechart']});
+
+ google.setOnLoadCallback(drawTable);
+ function drawTable() {
+ /* Build data table and views */
+ var ssim_data_table =
+ new google.visualization.DataTable(%(json_ssim_data)s);
+ var psnr_data_table =
+ new google.visualization.DataTable(%(json_psnr_data)s);
+ var packet_loss_data_table =
+ new google.visualization.DataTable(%(json_packet_loss_data)s);
+ var bit_rate_data_table =
+ new google.visualization.DataTable(%(json_bit_rate_data)s);
+
+ /* Display tables and charts */
+ var ssim_chart = new google.visualization.LineChart(
+ document.getElementById('table_div_ssim'));
+ ssim_chart.draw(ssim_data_table, {
+ colors: ['blue', 'orange'],
+ vAxis: {title: 'SSIM'},
+ hAxis: {title: 'Frame'},
+ width: 1200, height: 300,
+ });
+
+ var psnr_chart = new google.visualization.LineChart(
+ document.getElementById('table_div_psnr'));
+ psnr_chart.draw(psnr_data_table, {
+ colors: ['blue', 'orange'],
+ vAxis: {title: 'PSNR(dB)'},
+ hAxis: {title: 'Frame'},
+ width: 1200, height: 300,
+ });
+
+ var packet_loss_chart = new google.visualization.LineChart(
+ document.getElementById('table_div_packet_loss'));
+ packet_loss_chart.draw(packet_loss_data_table, {
+ colors: ['blue', 'orange'],
+ vAxis: {title: 'Packets dropped'},
+ hAxis: {title: 'Frame'},
+ width: 1200, height: 300,
+ });
+
+ var bit_rate_chart = new google.visualization.LineChart(
+ document.getElementById('table_div_bit_rate'));
+ bit_rate_chart.draw(bit_rate_data_table, {
+ colors: ['blue', 'orange', 'red'],
+ vAxis: {title: 'Bit rate'},
+ hAxis: {title: 'Frame'},
+ width: 1200, height: 300,
+ });
+ }
+ </script>
+ <body>
+ <h3>Messages:</h3>
+ <pre>%(messages)s</pre>
+ <h3>Metrics measured per frame:</h3>
+ <div id="table_div_ssim"></div>
+ <div id="table_div_psnr"></div>
+ <div id="table_div_packet_loss"></div>
+ <div id="table_div_bit_rate"></div>
+ </body>
+</html>
\ No newline at end of file
diff --git a/tools/python_charts/webrtc/__init__.py b/tools/python_charts/webrtc/__init__.py
new file mode 100644
index 0000000..c1caaa2
--- /dev/null
+++ b/tools/python_charts/webrtc/__init__.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
\ No newline at end of file
diff --git a/tools/python_charts/webrtc/data_helper.py b/tools/python_charts/webrtc/data_helper.py
new file mode 100644
index 0000000..17daf7d
--- /dev/null
+++ b/tools/python_charts/webrtc/data_helper.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+__author__ = 'kjellander@webrtc.org (Henrik Kjellander)'
+
+class DataHelper(object):
+ """
+ Helper class for managing table data.
+ This class does not verify the consistency of the data tables sent into it.
+ """
+
+ def __init__(self, data_list, table_description, names_list, messages):
+ """ Initializes the DataHelper with data.
+
+ Args:
+ data_list: List of one or more data lists in the format that the
+ Google Visualization Python API expects (list of dictionaries, one
+ per row of data). See the gviz_api.DataTable documentation for more
+ info.
+ table_description: dictionary describing the data types of all
+ columns in the data lists, as defined in the gviz_api.DataTable
+ documentation.
+ names_list: List of strings of what we're going to name the data
+ columns after. Usually different runs of data collection.
+ messages: List of strings we might append error messages to.
+ """
+ self.data_list = data_list
+ self.table_description = table_description
+ self.names_list = names_list
+ self.messages = messages
+ self.number_of_datasets = len(data_list)
+ self.number_of_frames = len(data_list[0])
+
+ def CreateData(self, field_name, start_frame=0, end_frame=0):
+ """ Creates a data structure for a specified data field.
+
+ Creates a data structure (data type description dictionary and a list
+ of data dictionaries) to be used with the Google Visualization Python
+ API. The frame_number column is always present and one column per data
+ set is added and its field name is suffixed by _N where N is the number
+ of the data set (0, 1, 2...)
+
+ Args:
+ field_name: String name of the field, must be present in the data
+ structure this DataHelper was created with.
+ start_frame: Frame number to start at (zero indexed). Default: 0.
+ end_frame: Frame number to be the last frame. If zero all frames
+ will be included. Default: 0.
+
+ Returns:
+ A tuple containing:
+ - a dictionary describing the columns in the data result_data_table below.
+ This description uses the name for each data set specified by
+ names_list.
+
+ Example with two data sets named 'Foreman' and 'Crew':
+ {
+ 'frame_number': ('number', 'Frame number'),
+ 'ssim_0': ('number', 'Foreman'),
+ 'ssim_1': ('number', 'Crew'),
+ }
+ - a list containing dictionaries (one per row) with the frame_number
+ column and one column of the specified field_name column per data
+ set.
+
+ Example with two data sets named 'Foreman' and 'Crew':
+ [
+ {'frame_number': 0, 'ssim_0': 0.98, 'ssim_1': 0.77 },
+ {'frame_number': 1, 'ssim_0': 0.81, 'ssim_1': 0.53 },
+ ]
+ """
+
+ # Build dictionary that describes the data types
+ result_table_description = {'frame_number': ('string', 'Frame number')}
+ for dataset_index in range(self.number_of_datasets):
+ column_name = '%s_%s' % (field_name, dataset_index)
+ column_type = self.table_description[field_name][0]
+ column_description = self.names_list[dataset_index]
+ result_table_description[column_name] = (column_type, column_description)
+
+ # Build data table of all the data
+ result_data_table = []
+ # We're going to have one dictionary per row.
+ # Create that and copy frame_number values from the first data set
+ for source_row in self.data_list[0]:
+ row_dict = { 'frame_number': source_row['frame_number'] }
+ result_data_table.append(row_dict)
+
+ # Pick target field data points from the all data tables
+ if end_frame == 0: # Default to all frames
+ end_frame = self.number_of_frames
+
+ for dataset_index in range(self.number_of_datasets):
+ for row_number in range(start_frame, end_frame):
+ column_name = '%s_%s' % (field_name, dataset_index)
+ # Stop if any of the data sets are missing the frame
+ try:
+ result_data_table[row_number][column_name] = \
+ self.data_list[dataset_index][row_number][field_name]
+ except IndexError:
+ self.messages.append("Couldn't find frame data for row %d "
+ "for %s" % (row_number, self.names_list[dataset_index]))
+ break
+ return (result_table_description, result_data_table)
+
+ def GetOrdering(self, table_description):
+ """ Creates a list of column names, ordered alphabetically except for the
+ frame_number column which always will be the first column.
+
+ Args:
+ table_description: A dictionary of column definitions as defined by the
+ gviz_api.DataTable documentation.
+ Returns:
+ A list of column names, where frame_number is the first and the
+ remaining columns are sorted alphabetically.
+ """
+ # The JSON data representation generated from gviz_api.DataTable.ToJSon()
+ # must have frame_number as its first column in order for the chart to
+ # use it as it's X-axis value series.
+ # gviz_api.DataTable orders the columns by name by default, which will
+ # be incorrect if we have column names that are sorted before frame_number
+ # in our data table.
+ columns_ordering = ['frame_number']
+ # add all other columns:
+ for column in sorted(table_description.keys()):
+ if column != 'frame_number':
+ columns_ordering.append(column)
+ return columns_ordering
\ No newline at end of file
diff --git a/tools/python_charts/webrtc/data_helper_test.py b/tools/python_charts/webrtc/data_helper_test.py
new file mode 100644
index 0000000..9aa020e
--- /dev/null
+++ b/tools/python_charts/webrtc/data_helper_test.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+__author__ = 'kjellander@webrtc.org (Henrik Kjellander)'
+
+import unittest
+import webrtc.data_helper
+
+class Test(unittest.TestCase):
+
+ def setUp(self):
+ # Simulate frame data from two different test runs, with 2 frames each.
+ self.frame_data_0 = [{'frame_number': 0, 'ssim': 0.5, 'psnr': 30.5},
+ {'frame_number': 1, 'ssim': 0.55, 'psnr': 30.55}]
+ self.frame_data_1 = [{'frame_number': 0, 'ssim': 0.6, 'psnr': 30.6},
+ {'frame_number': 0, 'ssim': 0.66, 'psnr': 30.66}]
+ self.all_data = [ self.frame_data_0, self.frame_data_1 ]
+
+ # Test with frame_number column in a non-first position sice we need to
+ # support reordering that to be able to use the gviz_api as we want.
+ self.type_description = {
+ 'ssim': ('number', 'SSIM'),
+ 'frame_number': ('number', 'Frame number'),
+ 'psnr': ('number', 'PSRN'),
+ }
+ self.names = ["Test 0", "Test 1"]
+
+ def testCreateData(self):
+ messages = []
+ helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
+ self.names, messages)
+ description, data_table = helper.CreateData('ssim')
+ self.assertEqual(3, len(description))
+ self.assertTrue('frame_number' in description)
+ self.assertTrue('ssim_0' in description)
+ self.assertTrue('number' in description['ssim_0'][0])
+ self.assertTrue('Test 0' in description['ssim_0'][1])
+ self.assertTrue('ssim_1' in description)
+ self.assertTrue('number' in description['ssim_1'][0])
+ self.assertTrue('Test 1' in description['ssim_1'][1])
+
+ self.assertEqual(0, len(messages))
+
+ self.assertEquals(2, len(data_table))
+ row = data_table[0]
+ self.assertEquals(0, row['frame_number'])
+ self.assertEquals(0.5, row['ssim_0'])
+ self.assertEquals(0.6, row['ssim_1'])
+ row = data_table[1]
+ self.assertEquals(1, row['frame_number'])
+ self.assertEquals(0.55, row['ssim_0'])
+ self.assertEquals(0.66, row['ssim_1'])
+
+ description, data_table = helper.CreateData('psnr')
+ self.assertEqual(3, len(description))
+ self.assertTrue('frame_number' in description)
+ self.assertTrue('psnr_0' in description)
+ self.assertTrue('psnr_1' in description)
+ self.assertEqual(0, len(messages))
+
+ self.assertEquals(2, len(data_table))
+ row = data_table[0]
+ self.assertEquals(0, row['frame_number'])
+ self.assertEquals(30.5, row['psnr_0'])
+ self.assertEquals(30.6, row['psnr_1'])
+ row = data_table[1]
+ self.assertEquals(1, row['frame_number'])
+ self.assertEquals(30.55, row['psnr_0'])
+ self.assertEquals(30.66, row['psnr_1'])
+
+ def testGetOrdering(self):
+ """ Tests that the ordering help method returns a list with frame_number
+ first and the rest sorted alphabetically """
+ messages = []
+ helper = webrtc.data_helper.DataHelper(self.all_data, self.type_description,
+ self.names, messages)
+ description, data_table = helper.CreateData('ssim')
+ columns = helper.GetOrdering(description)
+ self.assertEqual(3, len(columns))
+ self.assertEqual(0, len(messages))
+ self.assertEqual('frame_number', columns[0])
+ self.assertEqual('ssim_0', columns[1])
+ self.assertEqual('ssim_1', columns[2])
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/python_charts/webrtc/main.py b/tools/python_charts/webrtc/main.py
new file mode 100644
index 0000000..a06e960
--- /dev/null
+++ b/tools/python_charts/webrtc/main.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+__author__ = 'kjellander@webrtc.org (Henrik Kjellander)'
+
+import os
+import gviz_api
+import webrtc.data_helper
+
+def main():
+ """
+ This Python script displays a web page with test created with the
+ video_quality_measurement program, which is a tool in WebRTC.
+
+ The script requires on two external files and one Python library:
+ - A HTML template file with layout and references to the json variables
+ defined in this script
+ - A data file in Python format, containing the following:
+ - test_configuration - a dictionary of test configuration names and values.
+ - frame_data_types - a dictionary that maps the different metrics to their
+ data types
+ - frame_data - a list of dictionaries where each dictionary maps a metric to
+ it's value.
+ - The gviz_api.py of the Google Visualization Python API, available at
+ http://code.google.com/p/google-visualization-python/
+
+ The HTML file is shipped with the script, while the data file must be
+ generated by running video_quality_measurement with the --python flag
+ specified.
+ """
+ print 'Content-type: text/html\n' # the newline is required!
+
+ page_template_filename = '../templates/chart_page_template.html'
+ # The data files must be located in the project tree for app engine being
+ # able to access them.
+ data_filenames = [ '../data/vp8_sw.py', '../data/vp8_hw.py' ]
+ # Will contain info/error messages to be displayed on the resulting page.
+ messages = []
+ # Load the page HTML template.
+ try:
+ f = open(page_template_filename)
+ page_template = f.read()
+ f.close()
+ except IOError as e:
+ ShowErrorPage('Cannot open page template file: %s<br>Details: %s' %
+ (page_template_filename, e))
+ return
+
+ # Read data from external Python script files. First check that they exist.
+ for filename in data_filenames:
+ if not os.path.exists(filename):
+ messages.append('Cannot open data file: %s' % filename)
+ data_filenames.remove(filename)
+
+ # Read data from all existing input files.
+ data_list = []
+ test_configurations_list = []
+ names = []
+
+ for filename in data_filenames:
+ read_vars = {} # empty dictionary to load the data into.
+ execfile(filename, read_vars, read_vars)
+
+ test_configuration = read_vars['test_configuration']
+ table_description = read_vars['frame_data_types']
+ table_data = read_vars['frame_data']
+
+ # Verify the data in the file loaded properly.
+ if not table_description or not table_data:
+ messages.append('Invalid input file: %s. Missing description list or '
+ 'data dictionary variables.', filename)
+ continue
+
+ # Frame numbers appear as number type in the data, but Chart API requires
+ # values of the X-axis to be of string type.
+ # Change the frame_number column data type:
+ table_description['frame_number'] = ('string', 'Frame number')
+ # Convert all the values to string types:
+ for row in table_data:
+ row['frame_number'] = str(row['frame_number'])
+
+ # Store the unique data from this file in the high level lists.
+ test_configurations_list.append(test_configuration)
+ data_list.append(table_data)
+ # Use the filenames for name; strip away directory path and extension.
+ names.append(filename[filename.rfind('/')+1:filename.rfind('.')])
+
+ # Create data helper and build data tables for each graph.
+ helper = webrtc.data_helper.DataHelper(data_list, table_description,
+ names, messages)
+
+ # Loading it into gviz_api.DataTable objects and create JSON strings.
+ description, data = helper.CreateData('ssim')
+ ssim = gviz_api.DataTable(description, data)
+ json_ssim_data = ssim.ToJSon(helper.GetOrdering(description))
+
+ description, data = helper.CreateData('psnr')
+ psnr = gviz_api.DataTable(description, data)
+ json_psnr_data = psnr.ToJSon(helper.GetOrdering(description))
+
+ description, data = helper.CreateData('packets_dropped')
+ packet_loss = gviz_api.DataTable(description, data)
+ json_packet_loss_data = packet_loss.ToJSon(helper.GetOrdering(description))
+
+ description, data = helper.CreateData('bit_rate')
+ # Add a column of data points for the desired bit rate to be plotted.
+ # (uses test configuration from the last data set, assuming it is the same
+ # for all of them)
+ desired_bit_rate = -1
+ for row in test_configuration:
+ if row['name'] == 'bit_rate_in_kbps':
+ desired_bit_rate = int(row['value'])
+ if desired_bit_rate == -1:
+ ShowErrorPage('Cannot find bit rate in the test configuration.')
+ return
+ # Add new column data type description.
+ description['desired_bit_rate'] = ('number', 'Desired bit rate (kbps)')
+ for row in data:
+ row['desired_bit_rate'] = desired_bit_rate
+ bit_rate = gviz_api.DataTable(description, data)
+ json_bit_rate_data = bit_rate.ToJSon(helper.GetOrdering(description))
+
+ # Format the messages list with newlines.
+ messages = '\n'.join(messages)
+
+ # Put the variables as JSon strings into the template.
+ print page_template % vars()
+
+def ShowErrorPage(error_message):
+ print '<html><body>%s</body></html>' % error_message
+
+if __name__ == '__main__':
+ main()