Load /Users/solydzajs/Downloads/google_appengine into
authorPawel Solyga <Pawel.Solyga@gmail.com>
Tue, 20 Jan 2009 13:19:45 +0000
changeset 828 f5fd65cc3bf3
parent 827 88c186556a80
child 829 595b34a71cbb
Load /Users/solydzajs/Downloads/google_appengine into trunk/thirdparty/google_appengine.
thirdparty/google_appengine/BUGS
thirdparty/google_appengine/RELEASE_NOTES
thirdparty/google_appengine/VERSION
thirdparty/google_appengine/appcfg.py
thirdparty/google_appengine/bulkload_client.py
thirdparty/google_appengine/dev_appserver.py
thirdparty/google_appengine/google/appengine/api/api_base_pb.py
thirdparty/google_appengine/google/appengine/api/apiproxy_rpc.py
thirdparty/google_appengine/google/appengine/api/apiproxy_stub.py
thirdparty/google_appengine/google/appengine/api/apiproxy_stub_map.py
thirdparty/google_appengine/google/appengine/api/appinfo.py
thirdparty/google_appengine/google/appengine/api/capabilities/__init__.py
thirdparty/google_appengine/google/appengine/api/capabilities/capability_service_pb.py
thirdparty/google_appengine/google/appengine/api/capabilities/capability_stub.py
thirdparty/google_appengine/google/appengine/api/croninfo.py
thirdparty/google_appengine/google/appengine/api/datastore.py
thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py
thirdparty/google_appengine/google/appengine/api/datastore_types.py
thirdparty/google_appengine/google/appengine/api/images/__init__.py
thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py
thirdparty/google_appengine/google/appengine/api/mail_service_pb.py
thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py
thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py
thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py
thirdparty/google_appengine/google/appengine/api/user_service_pb.py
thirdparty/google_appengine/google/appengine/api/validation.py
thirdparty/google_appengine/google/appengine/base/capabilities_pb.py
thirdparty/google_appengine/google/appengine/cron/GrocLexer.py
thirdparty/google_appengine/google/appengine/cron/GrocParser.py
thirdparty/google_appengine/google/appengine/cron/__init__.py
thirdparty/google_appengine/google/appengine/cron/groc.py
thirdparty/google_appengine/google/appengine/cron/groctimespecification.py
thirdparty/google_appengine/google/appengine/datastore/datastore_index.py
thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py
thirdparty/google_appengine/google/appengine/datastore/entity_pb.py
thirdparty/google_appengine/google/appengine/ext/admin/__init__.py
thirdparty/google_appengine/google/appengine/ext/admin/templates/datastore.html
thirdparty/google_appengine/google/appengine/ext/bulkload/__init__.py
thirdparty/google_appengine/google/appengine/ext/db/__init__.py
thirdparty/google_appengine/google/appengine/ext/db/djangoforms.py
thirdparty/google_appengine/google/appengine/ext/db/polymodel.py
thirdparty/google_appengine/google/appengine/ext/remote_api/__init__.py
thirdparty/google_appengine/google/appengine/ext/remote_api/handler.py
thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_pb.py
thirdparty/google_appengine/google/appengine/ext/webapp/__init__.py
thirdparty/google_appengine/google/appengine/runtime/apiproxy.py
thirdparty/google_appengine/google/appengine/tools/appcfg.py
thirdparty/google_appengine/google/appengine/tools/appengine_rpc.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py
thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py
thirdparty/google_appengine/google/net/proto/RawMessage.py
thirdparty/google_appengine/lib/antlr3/AUTHORS
thirdparty/google_appengine/lib/antlr3/LICENSE
thirdparty/google_appengine/lib/antlr3/MANIFEST.in
thirdparty/google_appengine/lib/antlr3/OWNERS
thirdparty/google_appengine/lib/antlr3/README
thirdparty/google_appengine/lib/antlr3/antlr3/__init__.py
thirdparty/google_appengine/lib/antlr3/antlr3/compat.py
thirdparty/google_appengine/lib/antlr3/antlr3/constants.py
thirdparty/google_appengine/lib/antlr3/antlr3/dfa.py
thirdparty/google_appengine/lib/antlr3/antlr3/dottreegen.py
thirdparty/google_appengine/lib/antlr3/antlr3/exceptions.py
thirdparty/google_appengine/lib/antlr3/antlr3/extras.py
thirdparty/google_appengine/lib/antlr3/antlr3/main.py
thirdparty/google_appengine/lib/antlr3/antlr3/recognizers.py
thirdparty/google_appengine/lib/antlr3/antlr3/streams.py
thirdparty/google_appengine/lib/antlr3/antlr3/tokens.py
thirdparty/google_appengine/lib/antlr3/antlr3/tree.py
thirdparty/google_appengine/lib/antlr3/antlr3/treewizard.py
thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/PKG-INFO
thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/SOURCES.txt
thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/dependency_links.txt
thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/top_level.txt
thirdparty/google_appengine/lib/antlr3/setup.py
thirdparty/google_appengine/tools/bulkload_client.py
--- a/thirdparty/google_appengine/BUGS	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/BUGS	Tue Jan 20 13:19:45 2009 +0000
@@ -1,3 +1,3 @@
 A list of bugs is available in the Google App Engine SDK project on Google Code.
 
-Please visit http://code.google.com/p/googleappengine for more details.
+The issue tracker is at http://code.google.com/p/googleappengine/issues/.
--- a/thirdparty/google_appengine/RELEASE_NOTES	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/RELEASE_NOTES	Tue Jan 20 13:19:45 2009 +0000
@@ -3,6 +3,32 @@
 
 App Engine SDK - Release Notes
 
+Version 1.1.8 - January 7, 2008
+=================================
+  - Skip_files RegexStr validator allows lists to for regex-ors.
+      http://code.google.com/p/googleappengine/issues/detail?id=81
+  - sys.path and sys.argv are no longer reset for each request.
+      http://code.google.com/p/googleappengine/issues/detail?id=772
+  - New ByteString data type for the datastore.  Indexed non-text short-blob.
+  - UserProperty now takes auto_current_user and auto_current_user_add
+    attributes.
+  - Support for polymorphic models and queries.
+  - db.Model.order() now supports __key__.
+      http://code.google.com/p/googleappengine/issues/detail?id=884
+  - Urlfetch no longer sets content-length: 0 when there is no body.
+      http://code.google.com/p/googleappengine/issues/detail?id=817
+  - Get height and width of an image via the Images API.
+      http://code.google.com/p/googleappengine/issues/detail?id=435
+  - Limit auto-Bcc of email sender to the case where the email sender is the
+    currently-logged-in user.
+  - Adds limit of 100 order/filters on datastore query size to the SDK.
+  - Fix unicode support for the bulkloader
+      http://code.google.com/p/googleappengine/issues/detail?id=157
+  - Bulkload.py from the appengine/tools directory to the appengine/ directory
+  - Modify webapp to use logging.exception instead of logging.error.
+  - Additional fixes to SDK sanitizing response headers to match production.
+      http://code.google.com/p/googleappengine/issues/detail?id=198
+
 Version 1.1.7 - November 20, 2008
 =================================
   - Fixed an issue with urlfetch response headers.
--- a/thirdparty/google_appengine/VERSION	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/VERSION	Tue Jan 20 13:19:45 2009 +0000
@@ -1,3 +1,3 @@
-release: "1.1.7"
-timestamp: 1227225249
+release: "1.1.8"
+timestamp: 1231809440
 api_versions: ['1']
--- a/thirdparty/google_appengine/appcfg.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/appcfg.py	Tue Jan 20 13:19:45 2009 +0000
@@ -38,6 +38,7 @@
 
 EXTRA_PATHS = [
   DIR_PATH,
+  os.path.join(DIR_PATH, 'lib', 'antlr3'),
   os.path.join(DIR_PATH, 'lib', 'django'),
   os.path.join(DIR_PATH, 'lib', 'webob'),
   os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
@@ -47,9 +48,13 @@
   "dev_appserver.py" : "dev_appserver_main.py"
 }
 
-if __name__ == '__main__':
+def run_file(file_path, globals_):
+  """Execute the file at the specified path with the passed-in globals."""
   sys.path = EXTRA_PATHS + sys.path
-  script_name = os.path.basename(__file__)
+  script_name = os.path.basename(file_path)
   script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
   script_path = os.path.join(SCRIPT_DIR, script_name)
-  execfile(script_path, globals())
+  execfile(script_path, globals_)
+
+if __name__ == '__main__':
+  run_file(__file__, globals())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/bulkload_client.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Convenience wrapper for starting an appengine tool."""
+
+
+import os
+import sys
+
+if not hasattr(sys, 'version_info'):
+  sys.stderr.write('Very old versions of Python are not supported. Please '
+                   'use version 2.5 or greater.\n')
+  sys.exit(1)
+version_tuple = tuple(sys.version_info[:2])
+if version_tuple < (2, 4):
+  sys.stderr.write('Error: Python %d.%d is not supported. Please use '
+                   'version 2.5 or greater.\n' % version_tuple)
+  sys.exit(1)
+if version_tuple == (2, 4):
+  sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
+                   'break. Please use version 2.5 or greater.\n')
+
+DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
+
+EXTRA_PATHS = [
+  DIR_PATH,
+  os.path.join(DIR_PATH, 'lib', 'antlr3'),
+  os.path.join(DIR_PATH, 'lib', 'django'),
+  os.path.join(DIR_PATH, 'lib', 'webob'),
+  os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
+]
+
+SCRIPT_EXCEPTIONS = {
+  "dev_appserver.py" : "dev_appserver_main.py"
+}
+
+def run_file(file_path, globals_):
+  """Execute the file at the specified path with the passed-in globals."""
+  sys.path = EXTRA_PATHS + sys.path
+  script_name = os.path.basename(file_path)
+  script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
+  script_path = os.path.join(SCRIPT_DIR, script_name)
+  execfile(script_path, globals_)
+
+if __name__ == '__main__':
+  run_file(__file__, globals())
--- a/thirdparty/google_appengine/dev_appserver.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/dev_appserver.py	Tue Jan 20 13:19:45 2009 +0000
@@ -38,6 +38,7 @@
 
 EXTRA_PATHS = [
   DIR_PATH,
+  os.path.join(DIR_PATH, 'lib', 'antlr3'),
   os.path.join(DIR_PATH, 'lib', 'django'),
   os.path.join(DIR_PATH, 'lib', 'webob'),
   os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
@@ -47,9 +48,13 @@
   "dev_appserver.py" : "dev_appserver_main.py"
 }
 
-if __name__ == '__main__':
+def run_file(file_path, globals_):
+  """Execute the file at the specified path with the passed-in globals."""
   sys.path = EXTRA_PATHS + sys.path
-  script_name = os.path.basename(__file__)
+  script_name = os.path.basename(file_path)
   script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
   script_path = os.path.join(SCRIPT_DIR, script_name)
-  execfile(script_path, globals())
+  execfile(script_path, globals_)
+
+if __name__ == '__main__':
+  run_file(__file__, globals())
--- a/thirdparty/google_appengine/google/appengine/api/api_base_pb.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/api_base_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -52,12 +52,6 @@
     if self.has_value_ and self.value_ != x.value_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_value_):
@@ -138,12 +132,6 @@
     if self.has_value_ and self.value_ != x.value_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_value_):
@@ -224,12 +212,6 @@
     if self.has_value_ and self.value_ != x.value_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_value_):
@@ -310,12 +292,6 @@
     if self.has_value_ and self.value_ != x.value_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_value_):
@@ -395,12 +371,6 @@
     if self.has_value_ and self.value_ != x.value_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_value_):
@@ -464,12 +434,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/apiproxy_rpc.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Base class for implementing RPC of API proxy stubs."""
+
+
+
+
+
+import sys
+
+
+class RPC(object):
+  """Base class for implementing RPC of API proxy stubs.
+
+  To implement a RPC to make real asynchronous API call:
+    - Extend this class.
+    - Override _MakeCallImpl and/or _WaitImpl to do a real asynchronous call.
+  """
+
+  IDLE = 0
+  RUNNING = 1
+  FINISHING = 2
+
+  def __init__(self, package=None, call=None, request=None, response=None,
+               callback=None, stub=None):
+    """Constructor for the RPC object.
+
+    All arguments are optional, and simply set members on the class.
+    These data members will be overriden by values passed to MakeCall.
+
+    Args:
+      package: string, the package for the call
+      call: string, the call within the package
+      request: ProtocolMessage instance, appropriate for the arguments
+      response: ProtocolMessage instance, appropriate for the response
+      callback: callable, called when call is complete
+      stub: APIProxyStub instance, used in default _WaitImpl to do real call
+    """
+    self.__exception = None
+    self.__state = RPC.IDLE
+    self.__traceback = None
+
+    self.package = package
+    self.call = call
+    self.request = request
+    self.response = response
+    self.callback = callback
+    self.stub = stub
+
+  def MakeCall(self, package=None, call=None, request=None, response=None,
+               callback=None):
+    """Makes an asynchronous (i.e. non-blocking) API call within the
+    specified package for the specified call method.
+
+    It will call the _MakeRealCall to do the real job.
+
+    Args:
+      Same as constructor; see __init__.
+
+    Raises:
+      TypeError or AssertionError if an argument is of an invalid type.
+      AssertionError or RuntimeError is an RPC is already in use.
+    """
+    self.callback = callback or self.callback
+    self.package = package or self.package
+    self.call = call or self.call
+    self.request = request or self.request
+    self.response = response or self.response
+
+    assert self.__state is RPC.IDLE, ('RPC for %s.%s has already been started' %
+                                      (self.package, self.call))
+    assert self.callback is None or callable(self.callback)
+
+    self._MakeCallImpl()
+
+  def Wait(self):
+    """Waits on the API call associated with this RPC."""
+    rpc_completed = self._WaitImpl()
+
+    assert rpc_completed, ('RPC for %s.%s was not completed, and no other ' +
+                           'exception was raised ' % (self.package, self.call))
+
+  def CheckSuccess(self):
+    """If there was an exception, raise it now.
+
+    Raises:
+      Exception of the API call or the callback, if any.
+    """
+    if self.exception and self.__traceback:
+      raise self.exception.__class__, self.exception, self.__traceback
+    elif self.exception:
+      raise self.exception
+
+  @property
+  def exception(self):
+    return self.__exception
+
+  @property
+  def state(self):
+    return self.__state
+
+  def _MakeCallImpl(self):
+    """Override this method to implement a real asynchronous call rpc."""
+    self.__state = RPC.RUNNING
+
+  def _WaitImpl(self):
+    """Override this method to implement a real asynchronous call rpc.
+
+    Returns:
+      True if the async call was completed successfully.
+    """
+    try:
+      try:
+        self.stub.MakeSyncCall(self.package, self.call,
+                               self.request, self.response)
+      except Exception, e:
+        self.__exception = e
+    finally:
+      self.__state = RPC.FINISHING
+      self.__Callback()
+
+    return True
+
+  def __Callback(self):
+    if self.callback:
+      try:
+        self.callback()
+      except:
+        exc_class, self.__exception, self.__traceback = sys.exc_info()
+        self.__exception._appengine_apiproxy_rpc = self
+        raise
--- a/thirdparty/google_appengine/google/appengine/api/apiproxy_stub.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/apiproxy_stub.py	Tue Jan 20 13:19:45 2009 +0000
@@ -21,7 +21,7 @@
 
 
 
-
+from google.appengine.api import apiproxy_rpc
 from google.appengine.runtime import apiproxy_errors
 
 
@@ -49,6 +49,14 @@
     self.__service_name = service_name
     self.__max_request_size = max_request_size
 
+  def CreateRPC(self):
+    """Creates RPC object instance.
+
+    Returns:
+      a instance of RPC.
+    """
+    return apiproxy_rpc.RPC(stub=self)
+
   def MakeSyncCall(self, service, call, request, response):
     """The main RPC entry point.
 
--- a/thirdparty/google_appengine/google/appengine/api/apiproxy_stub_map.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/apiproxy_stub_map.py	Tue Jan 20 13:19:45 2009 +0000
@@ -27,10 +27,34 @@
 
 
 
+import inspect
 import sys
 
+def CreateRPC(service):
+  """Creates a RPC instance for the given service.
+
+  The instance is suitable for talking to remote services.
+  Each RPC instance can be used only once, and should not be reused.
+
+  Args:
+    service: string representing which service to call.
+
+  Returns:
+    the rpc object.
+
+  Raises:
+    AssertionError or RuntimeError if the stub for service doesn't supply a
+    CreateRPC method.
+  """
+  stub = apiproxy.GetStub(service)
+  assert stub, 'No api proxy found for service "%s"' % service
+  assert hasattr(stub, 'CreateRPC'), ('The service "%s" doesn\'t have ' +
+                                      'a CreateRPC method.' % service)
+  return stub.CreateRPC()
+
+
 def MakeSyncCall(service, call, request, response):
-  """The APIProxy entry point.
+  """The APIProxy entry point for a synchronous API call.
 
   Args:
     service: string representing which service to call
@@ -41,9 +65,99 @@
   Raises:
     apiproxy_errors.Error or a subclass.
   """
-  stub = apiproxy.GetStub(service)
-  assert stub, 'No api proxy found for service "%s"' % service
-  stub.MakeSyncCall(service, call, request, response)
+  apiproxy.MakeSyncCall(service, call, request, response)
+
+
+class ListOfHooks(object):
+  """An ordered collection of hooks for a particular API call.
+
+  A hook is a function that has exactly the same signature as
+  a service stub. It will be called before or after an api hook is
+  executed, depending on whether this list is for precall of postcall hooks.
+  Hooks can be used for debugging purposes (check certain
+  pre- or postconditions on api calls) or to apply patches to protocol
+  buffers before/after a call gets submitted.
+  """
+
+  def __init__(self):
+    """Constructor."""
+
+    self.__content = []
+
+    self.__unique_keys = set()
+
+  def __len__(self):
+    """Returns the amount of elements in the collection."""
+    return self.__content.__len__()
+
+  def __Insert(self, index, key, function, service=None):
+    """Appends a hook at a certain position in the list.
+
+    Args:
+      index: the index of where to insert the function
+      key: a unique key (within the module) for this particular function.
+        If something from the same module with the same key is already
+        registered, nothing will be added.
+      function: the hook to be added.
+      service: optional argument that restricts the hook to a particular api
+
+    Returns:
+      True if the collection was modified.
+    """
+    unique_key = (key, inspect.getmodule(function))
+    if unique_key in self.__unique_keys:
+      return False
+    self.__content.insert(index, (key, function, service))
+    self.__unique_keys.add(unique_key)
+    return True
+
+  def Append(self, key, function, service=None):
+    """Appends a hook at the end of the list.
+
+    Args:
+      key: a unique key (within the module) for this particular function.
+        If something from the same module with the same key is already
+        registered, nothing will be added.
+      function: the hook to be added.
+      service: optional argument that restricts the hook to a particular api
+
+    Returns:
+      True if the collection was modified.
+    """
+    return self.__Insert(len(self), key, function, service)
+
+  def Push(self, key, function, service=None):
+    """Inserts a hook at the beginning of the list.
+
+    Args:
+      key: a unique key (within the module) for this particular function.
+        If something from the same module with the same key is already
+        registered, nothing will be added.
+      function: the hook to be added.
+      service: optional argument that restricts the hook to a particular api
+
+    Returns:
+      True if the collection was modified.
+    """
+    return self.__Insert(0, key, function, service)
+
+  def Clear(self):
+    """Removes all hooks from the list (useful for unit tests)."""
+    self.__content = []
+    self.__unique_keys = set()
+
+  def Call(self, service, call, request, response):
+    """Invokes all hooks in this collection.
+
+    Args:
+      service: string representing which service to call
+      call: string representing which function to call
+      request: protocol buffer for the request
+      response: protocol buffer for the response
+    """
+    for key, function, srv in self.__content:
+      if srv is None or srv == service:
+        function(service, call, request, response)
 
 
 class APIProxyStubMap:
@@ -70,6 +184,16 @@
     """
     self.__stub_map = {}
     self.__default_stub = default_stub
+    self.__precall_hooks = ListOfHooks()
+    self.__postcall_hooks = ListOfHooks()
+
+  def GetPreCallHooks(self):
+    """Gets a collection for all precall hooks."""
+    return self.__precall_hooks
+
+  def GetPostCallHooks(self):
+    """Gets a collection for all precall hooks."""
+    return self.__postcall_hooks
 
   def RegisterStub(self, service, stub):
     """Register the provided stub for the specified service.
@@ -98,6 +222,25 @@
     """
     return self.__stub_map.get(service, self.__default_stub)
 
+  def MakeSyncCall(self, service, call, request, response):
+    """The APIProxy entry point.
+
+    Args:
+      service: string representing which service to call
+      call: string representing which function to call
+      request: protocol buffer for the request
+      response: protocol buffer for the response
+
+    Raises:
+      apiproxy_errors.Error or a subclass.
+    """
+    stub = self.GetStub(service)
+    assert stub, 'No api proxy found for service "%s"' % service
+    self.__precall_hooks.Call(service, call, request, response)
+    stub.MakeSyncCall(service, call, request, response)
+    self.__postcall_hooks.Call(service, call, request, response)
+
+
 def GetDefaultAPIProxy():
   try:
     runtime = __import__('google.appengine.runtime', globals(), locals(),
--- a/thirdparty/google_appengine/google/appengine/api/appinfo.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/appinfo.py	Tue Jan 20 13:19:45 2009 +0000
@@ -54,6 +54,8 @@
 APPLICATION_RE_STRING = r'(?!-)[a-z\d\-]{1,%d}' % APP_ID_MAX_LEN
 VERSION_RE_STRING = r'(?!-)[a-z\d\-]{1,%d}' % MAJOR_VERSION_ID_MAX_LEN
 
+RUNTIME_RE_STRING = r'[a-z]{1,30}'
+
 HANDLER_STATIC_FILES = 'static_files'
 HANDLER_STATIC_DIR = 'static_dir'
 HANDLER_SCRIPT = 'script'
@@ -66,8 +68,6 @@
 SECURE_HTTPS = 'always'
 SECURE_HTTP_OR_HTTPS = 'optional'
 
-RUNTIME_PYTHON = 'python'
-
 DEFAULT_SKIP_FILES = (r"^(.*/)?("
                       r"(app\.yaml)|"
                       r"(app\.yml)|"
@@ -304,7 +304,7 @@
 
     APPLICATION: APPLICATION_RE_STRING,
     VERSION: VERSION_RE_STRING,
-    RUNTIME: validation.Options(RUNTIME_PYTHON),
+    RUNTIME: RUNTIME_RE_STRING,
 
 
     API_VERSION: validation.Options('1', 'beta'),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/capabilities/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Allows applications to identify API outages and scheduled downtime.
+
+Some examples:
+  def StoreUploadedProfileImage(self):
+    uploaded_image = self.request.get('img')
+    # If the images API is unavailable, we'll just skip the resize.
+    if CapabilitySet('images').is_enabled():
+      uploaded_image = images.resize(uploaded_image, 64, 64)
+    store(uploaded_image)
+
+  def RenderHTMLForm(self):
+    datastore_readonly = CapabilitySet('datastore_v3', capabilities=['write'])
+    if datastore_readonly.may_be_disabled_in(60):
+      # self.response.out('<p>Not accepting submissions right now: %s</p>' %
+                          datastore_readonly.admin_message())
+      # ...render form with form elements disabled...
+    else:
+      # ...render form normally...
+
+  Individual API wrapper modules should expose CapabilitySet objects
+  for users rather than relying on users to create them.  They may
+  also create convenience methods (e.g. db.IsReadOnly()) that delegate
+  to the relevant CapabilitySet.
+
+Classes defined here:
+  CapabilitySet: encapsulates one or more capabilities, allows introspection.
+  UnknownCapabilityError: thrown when an unknown capability is requested.
+"""
+
+
+
+
+
+from google.appengine.api.capabilities import capability_service_pb
+from google.appengine.base import capabilities_pb
+from google.appengine.api import apiproxy_stub_map
+
+
+IsEnabledRequest  = capability_service_pb.IsEnabledRequest
+IsEnabledResponse = capability_service_pb.IsEnabledResponse
+CapabilityConfig  = capabilities_pb.CapabilityConfig
+
+
+class UnknownCapabilityError(Exception):
+  """An unknown capability was requested."""
+
+
+class CapabilitySet(object):
+  """Encapsulates one or more capabilities.
+
+  Capabilities can either be named explicitly, or inferred from the
+  list of methods provided.  If no capabilities or methods are
+  provided, this will check whether the entire package is enabled.
+  """
+  def __init__(self, package, capabilities=None, methods=None,
+               stub_map=apiproxy_stub_map):
+    """Constructor.
+
+    Args:
+      capabilities: list of strings
+      methods: list of strings
+    """
+    if capabilities is None:
+      capabilities = []
+    if methods is None:
+      methods = []
+    self._package = package
+    self._capabilities = ['*'] + capabilities
+    self._methods = methods
+    self._stub_map = stub_map
+
+  def is_enabled(self):
+    """Tests whether the capabilities is currently enabled.
+
+    Returns:
+      True if API calls that require these capabillities will succeed.
+
+    Raises:
+      UnknownCapabilityError, if a specified capability was not recognized.
+    """
+    config = self._get_status()
+    return config.summary_status() in (IsEnabledResponse.ENABLED,
+                                       IsEnabledResponse.SCHEDULED_FUTURE,
+                                       IsEnabledResponse.SCHEDULED_NOW)
+
+  def will_remain_enabled_for(self, time=60):
+    """Returns true if it will remain enabled for the specified amount of time.
+
+    Args:
+      time: Number of seconds in the future to look when checking for scheduled
+        downtime.
+
+    Returns:
+      True if there is no scheduled downtime for the specified capability
+      within the amount of time specified.
+
+    Raises:
+      UnknownCapabilityError, if a specified capability was not recognized.
+    """
+    config = self._get_status()
+
+    status = config.summary_status()
+    if status == IsEnabledResponse.ENABLED:
+      return True
+    elif status == IsEnabledResponse.SCHEDULED_NOW:
+      return False
+    elif status == IsEnabledResponse.SCHEDULED_FUTURE:
+      if config.has_time_until_scheduled():
+        return config.time_until_scheduled() >= time
+      else:
+        return True
+    elif status == IsEnabledResponse.DISABLED:
+      return False
+    else:
+      return False
+
+  def admin_message(self):
+    """Get any administrator notice messages for these capabilities.
+
+    Returns:
+      A string containing one or more admin messages, or an empty string.
+
+    Raises:
+      UnknownCapabilityError, if a specified capability was not recognized.
+    """
+    message_list = []
+    for config in self._get_status().config_list():
+      message = config.admin_message()
+      if message and message not in message_list:
+        message_list.append(message)
+    return '  '.join(message_list)
+
+  def _get_status(self):
+    """Get an IsEnabledResponse for the capabilities listed.
+
+    Returns:
+      IsEnabledResponse for the specified capabilities.
+
+    Raises:
+      UnknownCapabilityError: If an unknown capability was requested.
+    """
+    req = IsEnabledRequest()
+    req.set_package(self._package)
+    for capability in self._capabilities:
+      req.add_capability(capability)
+    for method in self._methods:
+      req.add_call(method)
+
+    resp = capability_service_pb.IsEnabledResponse()
+    self._stub_map.MakeSyncCall('capability_service', 'IsEnabled', req, resp)
+
+    if resp.summary_status() == IsEnabledResponse.UNKNOWN:
+      raise UnknownCapabilityError()
+
+    return resp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/capabilities/capability_service_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,361 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.net.proto import ProtocolBuffer
+import array
+import dummy_thread as thread
+
+__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
+                   unusednames=printElemNumber,debug_strs no-special"""
+
+from google.appengine.base.capabilities_pb import CapabilityConfig
+class IsEnabledRequest(ProtocolBuffer.ProtocolMessage):
+  has_package_ = 0
+  package_ = ""
+
+  def __init__(self, contents=None):
+    self.capability_ = []
+    self.call_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def package(self): return self.package_
+
+  def set_package(self, x):
+    self.has_package_ = 1
+    self.package_ = x
+
+  def clear_package(self):
+    self.has_package_ = 0
+    self.package_ = ""
+
+  def has_package(self): return self.has_package_
+
+  def capability_size(self): return len(self.capability_)
+  def capability_list(self): return self.capability_
+
+  def capability(self, i):
+    return self.capability_[i]
+
+  def set_capability(self, i, x):
+    self.capability_[i] = x
+
+  def add_capability(self, x):
+    self.capability_.append(x)
+
+  def clear_capability(self):
+    self.capability_ = []
+
+  def call_size(self): return len(self.call_)
+  def call_list(self): return self.call_
+
+  def call(self, i):
+    return self.call_[i]
+
+  def set_call(self, i, x):
+    self.call_[i] = x
+
+  def add_call(self, x):
+    self.call_.append(x)
+
+  def clear_call(self):
+    self.call_ = []
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_package()): self.set_package(x.package())
+    for i in xrange(x.capability_size()): self.add_capability(x.capability(i))
+    for i in xrange(x.call_size()): self.add_call(x.call(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_package_ != x.has_package_: return 0
+    if self.has_package_ and self.package_ != x.package_: return 0
+    if len(self.capability_) != len(x.capability_): return 0
+    for e1, e2 in zip(self.capability_, x.capability_):
+      if e1 != e2: return 0
+    if len(self.call_) != len(x.call_): return 0
+    for e1, e2 in zip(self.call_, x.call_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_package_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: package not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.package_))
+    n += 1 * len(self.capability_)
+    for i in xrange(len(self.capability_)): n += self.lengthString(len(self.capability_[i]))
+    n += 1 * len(self.call_)
+    for i in xrange(len(self.call_)): n += self.lengthString(len(self.call_[i]))
+    return n + 1
+
+  def Clear(self):
+    self.clear_package()
+    self.clear_capability()
+    self.clear_call()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.package_)
+    for i in xrange(len(self.capability_)):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.capability_[i])
+    for i in xrange(len(self.call_)):
+      out.putVarInt32(26)
+      out.putPrefixedString(self.call_[i])
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_package(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.add_capability(d.getPrefixedString())
+        continue
+      if tt == 26:
+        self.add_call(d.getPrefixedString())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_package_: res+=prefix+("package: %s\n" % self.DebugFormatString(self.package_))
+    cnt=0
+    for e in self.capability_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("capability%s: %s\n" % (elm, self.DebugFormatString(e)))
+      cnt+=1
+    cnt=0
+    for e in self.call_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("call%s: %s\n" % (elm, self.DebugFormatString(e)))
+      cnt+=1
+    return res
+
+  kpackage = 1
+  kcapability = 2
+  kcall = 3
+
+  _TEXT = (
+   "ErrorCode",
+   "package",
+   "capability",
+   "call",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class IsEnabledResponse(ProtocolBuffer.ProtocolMessage):
+
+  ENABLED      =    1
+  SCHEDULED_FUTURE =    2
+  SCHEDULED_NOW =    3
+  DISABLED     =    4
+  UNKNOWN      =    5
+
+  _SummaryStatus_NAMES = {
+    1: "ENABLED",
+    2: "SCHEDULED_FUTURE",
+    3: "SCHEDULED_NOW",
+    4: "DISABLED",
+    5: "UNKNOWN",
+  }
+
+  def SummaryStatus_Name(cls, x): return cls._SummaryStatus_NAMES.get(x, "")
+  SummaryStatus_Name = classmethod(SummaryStatus_Name)
+
+  has_summary_status_ = 0
+  summary_status_ = 0
+  has_time_until_scheduled_ = 0
+  time_until_scheduled_ = 0
+
+  def __init__(self, contents=None):
+    self.config_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def summary_status(self): return self.summary_status_
+
+  def set_summary_status(self, x):
+    self.has_summary_status_ = 1
+    self.summary_status_ = x
+
+  def clear_summary_status(self):
+    self.has_summary_status_ = 0
+    self.summary_status_ = 0
+
+  def has_summary_status(self): return self.has_summary_status_
+
+  def time_until_scheduled(self): return self.time_until_scheduled_
+
+  def set_time_until_scheduled(self, x):
+    self.has_time_until_scheduled_ = 1
+    self.time_until_scheduled_ = x
+
+  def clear_time_until_scheduled(self):
+    self.has_time_until_scheduled_ = 0
+    self.time_until_scheduled_ = 0
+
+  def has_time_until_scheduled(self): return self.has_time_until_scheduled_
+
+  def config_size(self): return len(self.config_)
+  def config_list(self): return self.config_
+
+  def config(self, i):
+    return self.config_[i]
+
+  def mutable_config(self, i):
+    return self.config_[i]
+
+  def add_config(self):
+    x = CapabilityConfig()
+    self.config_.append(x)
+    return x
+
+  def clear_config(self):
+    self.config_ = []
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_summary_status()): self.set_summary_status(x.summary_status())
+    if (x.has_time_until_scheduled()): self.set_time_until_scheduled(x.time_until_scheduled())
+    for i in xrange(x.config_size()): self.add_config().CopyFrom(x.config(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_summary_status_ != x.has_summary_status_: return 0
+    if self.has_summary_status_ and self.summary_status_ != x.summary_status_: return 0
+    if self.has_time_until_scheduled_ != x.has_time_until_scheduled_: return 0
+    if self.has_time_until_scheduled_ and self.time_until_scheduled_ != x.time_until_scheduled_: return 0
+    if len(self.config_) != len(x.config_): return 0
+    for e1, e2 in zip(self.config_, x.config_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_summary_status_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: summary_status not set.')
+    for p in self.config_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthVarInt64(self.summary_status_)
+    if (self.has_time_until_scheduled_): n += 1 + self.lengthVarInt64(self.time_until_scheduled_)
+    n += 1 * len(self.config_)
+    for i in xrange(len(self.config_)): n += self.lengthString(self.config_[i].ByteSize())
+    return n + 1
+
+  def Clear(self):
+    self.clear_summary_status()
+    self.clear_time_until_scheduled()
+    self.clear_config()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(8)
+    out.putVarInt32(self.summary_status_)
+    if (self.has_time_until_scheduled_):
+      out.putVarInt32(16)
+      out.putVarInt64(self.time_until_scheduled_)
+    for i in xrange(len(self.config_)):
+      out.putVarInt32(26)
+      out.putVarInt32(self.config_[i].ByteSize())
+      self.config_[i].OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 8:
+        self.set_summary_status(d.getVarInt32())
+        continue
+      if tt == 16:
+        self.set_time_until_scheduled(d.getVarInt64())
+        continue
+      if tt == 26:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_config().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_summary_status_: res+=prefix+("summary_status: %s\n" % self.DebugFormatInt32(self.summary_status_))
+    if self.has_time_until_scheduled_: res+=prefix+("time_until_scheduled: %s\n" % self.DebugFormatInt64(self.time_until_scheduled_))
+    cnt=0
+    for e in self.config_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("config%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    return res
+
+  ksummary_status = 1
+  ktime_until_scheduled = 2
+  kconfig = 3
+
+  _TEXT = (
+   "ErrorCode",
+   "summary_status",
+   "time_until_scheduled",
+   "config",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+
+__all__ = ['IsEnabledRequest','IsEnabledResponse']
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/capabilities/capability_stub.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Stub version of the capability service API, everything is always enabled."""
+
+
+
+from google.appengine.api import apiproxy_stub
+from google.appengine.api import capabilities
+
+IsEnabledRequest = capabilities.IsEnabledRequest
+IsEnabledResponse = capabilities.IsEnabledResponse
+CapabilityConfig = capabilities.CapabilityConfig
+
+class CapabilityServiceStub(apiproxy_stub.APIProxyStub):
+  """Python only capability service stub."""
+
+  def __init__(self, service_name='capability_service'):
+    """Constructor.
+
+    Args:
+      service_name: Service name expected for all calls.
+    """
+    super(CapabilityServiceStub, self).__init__(service_name)
+
+
+  def _Dynamic_IsEnabled(self, request, response):
+    """Implementation of CapabilityService::IsEnabled().
+
+    Args:
+      request: An IsEnabledRequest.
+      response: An IsEnabledResponse.
+    """
+    response.set_summary_status(IsEnabledResponse.ENABLED)
+
+    default_config = response.add_config()
+    default_config.set_package('')
+    default_config.set_capability('')
+    default_config.set_status(CapabilityConfig.ENABLED)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/croninfo.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""CronInfo tools.
+
+A library for working with CronInfo records, describing cron entries for an
+application. Supports loading the records from yaml.
+"""
+
+
+
+from google.appengine.cron import groc
+from google.appengine.api import validation
+from google.appengine.api import yaml_builder
+from google.appengine.api import yaml_listener
+from google.appengine.api import yaml_object
+
+_URL_REGEX = r'^/.*$'
+
+
+_TIMEZONE_REGEX = r'^.{0,100}$'
+
+_DESCRIPTION_REGEX = r'^.{0,499}$'
+
+
+class GrocValidator(validation.Validator):
+  """Checks that a schedule is in valid groc format."""
+
+  def Validate(self, value):
+    """Validates a schedule."""
+    if value is None:
+      raise validation.MissingAttribute('schedule must be specified')
+    if not isinstance(value, basestring):
+      raise TypeError('schedule must be a string, not \'%r\''%type(value))
+    schedule = groc.CreateParser(value)
+    try:
+      schedule.timespec()
+    except groc.GrocException, e:
+      raise validation.ValidationError('schedule \'%s\' failed to parse: %s'%(
+          value, e.args[0]))
+    return value
+
+
+CRON = 'cron'
+
+URL = 'url'
+SCHEDULE = 'schedule'
+TIMEZONE = 'timezone'
+DESCRIPTION = 'description'
+
+
+class MalformedCronfigurationFile(Exception):
+  """Configuration file for Cron is malformed."""
+  pass
+
+
+class CronEntry(validation.Validated):
+  """A cron entry describes a single cron job."""
+  ATTRIBUTES = {
+      URL: _URL_REGEX,
+      SCHEDULE: GrocValidator(),
+      TIMEZONE: validation.Optional(_TIMEZONE_REGEX),
+      DESCRIPTION: validation.Optional(_DESCRIPTION_REGEX)
+  }
+
+
+class CronInfoExternal(validation.Validated):
+  """CronInfoExternal describes all cron entries for an application."""
+  ATTRIBUTES = {
+      CRON: validation.Optional(validation.Repeated(CronEntry))
+  }
+
+
+def LoadSingleCron(cron_info):
+  """Load a cron.yaml file or string and return a CronInfoExternal object."""
+  builder = yaml_object.ObjectBuilder(CronInfoExternal)
+  handler = yaml_builder.BuilderHandler(builder)
+  listener = yaml_listener.EventListener(handler)
+  listener.Parse(cron_info)
+
+  cron_info = handler.GetResults()
+  if len(cron_info) < 1:
+    raise MalformedCronfigurationFile('Empty cron configuration.')
+  if len(cron_info) > 1:
+    raise MalformedCronfigurationFile('Multiple cron sections '
+                                      'in configuration.')
+  return cron_info[0]
--- a/thirdparty/google_appengine/google/appengine/api/datastore.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/datastore.py	Tue Jan 20 13:19:45 2009 +0000
@@ -257,7 +257,7 @@
   if tx:
     tx.RecordModifiedKeys(keys)
 
-  resp = api_base_pb.VoidProto()
+  resp = datastore_pb.DeleteResponse()
   try:
     apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Delete', req, resp)
   except apiproxy_errors.ApplicationError, err:
@@ -1089,7 +1089,8 @@
           'Inequality operators (%s) must be on the same property as the '
           'first sort order, if any sort orders are supplied' %
           ', '.join(self.INEQUALITY_OPERATORS))
-    elif property in datastore_types._SPECIAL_PROPERTIES:
+
+    if property in datastore_types._SPECIAL_PROPERTIES:
       if property == datastore_types._KEY_SPECIAL_PROPERTY:
         for value in values:
           if not isinstance(value, Key):
--- a/thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py	Tue Jan 20 13:19:45 2009 +0000
@@ -69,7 +69,11 @@
 _MAXIMUM_RESULTS = 1000
 
 
-_MAX_QUERY_OFFSET = 4000
+_MAX_QUERY_OFFSET = 1000
+
+
+_MAX_QUERY_COMPONENTS = 100
+
 
 class _StoredEntity(object):
   """Simple wrapper around an entity stored by the stub.
@@ -221,6 +225,16 @@
     if app_kind in self.__schema_cache:
       del self.__schema_cache[app_kind]
 
+  READ_PB_EXCEPTIONS = (ProtocolBuffer.ProtocolBufferDecodeError, LookupError,
+                        TypeError, ValueError)
+  READ_ERROR_MSG = ('Data in %s is corrupt or a different version. '
+                    'Try running with the --clear_datastore flag.\n%r')
+  READ_PY250_MSG = ('Are you using FloatProperty and/or GeoPtProperty? '
+                    'Unfortunately loading float values from the datastore '
+                    'file does not work with Python 2.5.0. '
+                    'Please upgrade to a newer Python 2.5 release or use '
+                    'the --clear_datastore flag.\n')
+
   def Read(self):
     """ Reads the datastore and history files into memory.
 
@@ -235,18 +249,21 @@
 
     Also sets __next_id to one greater than the highest id allocated so far.
     """
-    pb_exceptions = (ProtocolBuffer.ProtocolBufferDecodeError, LookupError,
-                     TypeError, ValueError)
-    error_msg = ('Data in %s is corrupt or a different version. '
-                 'Try running with the --clear_datastore flag.\n%r')
-
     if self.__datastore_file and self.__datastore_file != '/dev/null':
       for encoded_entity in self.__ReadPickled(self.__datastore_file):
         try:
           entity = entity_pb.EntityProto(encoded_entity)
-        except pb_exceptions, e:
-          raise datastore_errors.InternalError(error_msg %
+        except self.READ_PB_EXCEPTIONS, e:
+          raise datastore_errors.InternalError(self.READ_ERROR_MSG %
                                                (self.__datastore_file, e))
+        except struct.error, e:
+          if (sys.version_info[0:3] == (2, 5, 0)
+              and e.message.startswith('unpack requires a string argument')):
+            raise datastore_errors.InternalError(self.READ_PY250_MSG +
+                                                 self.READ_ERROR_MSG %
+                                                 (self.__datastore_file, e))
+          else:
+            raise
 
         self._StoreEntity(entity)
 
@@ -258,8 +275,8 @@
       for encoded_query, count in self.__ReadPickled(self.__history_file):
         try:
           query_pb = datastore_pb.Query(encoded_query)
-        except pb_exceptions, e:
-          raise datastore_errors.InternalError(error_msg %
+        except self.READ_PB_EXCEPTIONS, e:
+          raise datastore_errors.InternalError(self.READ_ERROR_MSG %
                                                (self.__history_file, e))
 
         if query_pb in self.__query_history:
@@ -439,13 +456,22 @@
   def _Dynamic_RunQuery(self, query, query_result):
     if not self.__tx_lock.acquire(False):
       raise apiproxy_errors.ApplicationError(
-        datastore_pb.Error.BAD_REQUEST, "Can't query inside a transaction.")
+          datastore_pb.Error.BAD_REQUEST, 'Can\'t query inside a transaction.')
     else:
       self.__tx_lock.release()
 
     if query.has_offset() and query.offset() > _MAX_QUERY_OFFSET:
-       raise apiproxy_errors.ApplicationError(
-         datastore_pb.Error.BAD_REQUEST, "Too big query offset.")
+      raise apiproxy_errors.ApplicationError(
+          datastore_pb.Error.BAD_REQUEST, 'Too big query offset.')
+
+    num_components = len(query.filter_list()) + len(query.order_list())
+    if query.has_ancestor():
+      num_components += 1
+    if num_components > _MAX_QUERY_COMPONENTS:
+      raise apiproxy_errors.ApplicationError(
+          datastore_pb.Error.BAD_REQUEST,
+          ('query is too large. may not have more than %s filters'
+           ' + sort orders ancestor total' % _MAX_QUERY_COMPONENTS))
 
     app = query.app()
 
--- a/thirdparty/google_appengine/google/appengine/api/datastore_types.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/datastore_types.py	Tue Jan 20 13:19:45 2009 +0000
@@ -867,12 +867,45 @@
     encoded = base64.urlsafe_b64encode(self)
     return saxutils.escape(encoded)
 
+class ByteString(str):
+  """A byte-string type, appropriate for storing short amounts of indexed data.
+
+  This behaves identically to Blob, except it's used only for short, indexed
+  byte strings.
+  """
+
+  def __new__(cls, arg=None):
+    """Constructor.
+
+    We only accept str instances.
+
+    Args:
+      arg: optional str instance (default '')
+    """
+    if arg is None:
+      arg = ''
+    if isinstance(arg, str):
+      return super(ByteString, cls).__new__(cls, arg)
+
+    raise TypeError('ByteString() argument should be str instance, not %s' %
+                    type(arg).__name__)
+
+  def ToXml(self):
+    """Output a ByteString as XML.
+
+    Returns:
+      Base64 encoded version of itself for safe insertion in to an XML document.
+    """
+    encoded = base64.urlsafe_b64encode(self)
+    return saxutils.escape(encoded)
+
 
 _PROPERTY_MEANINGS = {
 
 
 
   Blob:              entity_pb.Property.BLOB,
+  ByteString:        entity_pb.Property.BYTESTRING,
   Text:              entity_pb.Property.TEXT,
   datetime.datetime: entity_pb.Property.GD_WHEN,
   Category:          entity_pb.Property.ATOM_CATEGORY,
@@ -887,6 +920,7 @@
 
 _PROPERTY_TYPES = frozenset([
   Blob,
+  ByteString,
   bool,
   Category,
   datetime.datetime,
@@ -997,6 +1031,7 @@
 
 _VALIDATE_PROPERTY_VALUES = {
   Blob: ValidatePropertyNothing,
+  ByteString: ValidatePropertyString,
   bool: ValidatePropertyNothing,
   Category: ValidatePropertyString,
   datetime.datetime: ValidatePropertyNothing,
@@ -1193,6 +1228,7 @@
 
 _PACK_PROPERTY_VALUES = {
   Blob: PackBlob,
+  ByteString: PackBlob,
   bool: PackBool,
   Category: PackString,
   datetime.datetime: PackDatetime,
@@ -1305,6 +1341,7 @@
   entity_pb.Property.GD_POSTALADDRESS:  PostalAddress,
   entity_pb.Property.GD_RATING:         Rating,
   entity_pb.Property.BLOB:              Blob,
+  entity_pb.Property.BYTESTRING:        ByteString,
   entity_pb.Property.TEXT:              Text,
 }
 
@@ -1324,7 +1361,7 @@
 
   if pbval.has_stringvalue():
     value = pbval.stringvalue()
-    if meaning != entity_pb.Property.BLOB:
+    if meaning not in (entity_pb.Property.BLOB, entity_pb.Property.BYTESTRING):
       value = unicode(value.decode('utf-8'))
   elif pbval.has_int64value():
     value = long(pbval.int64value())
@@ -1388,6 +1425,7 @@
     'float':            float,
     'key':              Key,
     'blob':             Blob,
+    'bytestring':       ByteString,
     'text':             Text,
     'user':             users.User,
     'atom:category':    Category,
--- a/thirdparty/google_appengine/google/appengine/api/images/__init__.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/images/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -30,6 +30,8 @@
 
 
 
+import struct
+
 from google.appengine.api import apiproxy_stub_map
 from google.appengine.api.images import images_service_pb
 from google.appengine.runtime import apiproxy_errors
@@ -83,6 +85,8 @@
     self._image_data = image_data
     self._transforms = []
     self._transform_map = {}
+    self._width = None
+    self._height = None
 
   def _check_transform_limits(self, transform):
     """Ensure some simple limits on the number of transforms allowed.
@@ -104,6 +108,175 @@
                             "requested on this image." % transform_name)
     self._transform_map[transform] = True
 
+  def _update_dimensions(self):
+    """Updates the width and height fields of the image.
+
+    Raises:
+      NotImageError if the image data is not an image.
+      BadImageError if the image data is corrupt.
+    """
+    size = len(self._image_data)
+    if size >= 6 and self._image_data.startswith("GIF"):
+      self._update_gif_dimensions()
+    elif size >= 8 and self._image_data.startswith("\x89PNG\x0D\x0A\x1A\x0A"):
+      self._update_png_dimensions()
+    elif size >= 2 and self._image_data.startswith("\xff\xD8"):
+      self._update_jpeg_dimensions()
+    elif (size >= 8 and (self._image_data.startswith("II\x2a\x00") or
+                         self._image_data.startswith("MM\x00\x2a"))):
+      self._update_tiff_dimensions()
+    elif size >= 2 and self._image_data.startswith("BM"):
+      self._update_bmp_dimensions()
+    elif size >= 4 and self._image_data.startswith("\x00\x00\x01\x00"):
+      self._update_ico_dimensions()
+    else:
+      raise NotImageError("Unrecognized image format")
+
+  def _update_gif_dimensions(self):
+    """Updates the width and height fields of the gif image.
+
+    Raises:
+      BadImageError if the image string is not a valid gif image.
+    """
+    size = len(self._image_data)
+    if size >= 10:
+      self._width, self._height = struct.unpack("<HH", self._image_data[6:10])
+    else:
+      raise BadImageError("Corrupt GIF format")
+
+  def _update_png_dimensions(self):
+    """Updates the width and height fields of the png image.
+
+    Raises:
+      BadImageError if the image string is not a valid png image.
+    """
+    size = len(self._image_data)
+    if size >= 24 and self._image_data[12:16] == "IHDR":
+      self._width, self._height = struct.unpack(">II", self._image_data[16:24])
+    else:
+      raise BadImageError("Corrupt PNG format")
+
+  def _update_jpeg_dimensions(self):
+    """Updates the width and height fields of the jpeg image.
+
+    Raises:
+      BadImageError if the image string is not a valid jpeg image.
+    """
+    size = len(self._image_data)
+    offset = 2
+    while offset < size:
+      while offset < size and ord(self._image_data[offset]) != 0xFF:
+        offset += 1
+      while offset < size and ord(self._image_data[offset]) == 0xFF:
+        offset += 1
+      if (offset < size and ord(self._image_data[offset]) & 0xF0 == 0xC0 and
+          ord(self._image_data[offset]) != 0xC4):
+        offset += 4
+        if offset + 4 < size:
+          self._height, self._width = struct.unpack(
+              ">HH",
+              self._image_data[offset:offset + 4])
+          break
+        else:
+          raise BadImageError("Corrupt JPEG format")
+      elif offset + 2 <= size:
+        offset += 1
+        offset += struct.unpack(">H", self._image_data[offset:offset + 2])[0]
+      else:
+        raise BadImageError("Corrupt JPEG format")
+    if self._height is None or self._width is None:
+      raise BadImageError("Corrupt JPEG format")
+
+  def _update_tiff_dimensions(self):
+    """Updates the width and height fields of the tiff image.
+
+    Raises:
+      BadImageError if the image string is not a valid tiff image.
+    """
+    size = len(self._image_data)
+    if self._image_data.startswith("II"):
+      endianness = "<"
+    else:
+      endianness = ">"
+    ifd_offset = struct.unpack(endianness + "I", self._image_data[4:8])[0]
+    if ifd_offset < size + 14:
+      ifd_size = struct.unpack(
+          endianness + "H",
+          self._image_data[ifd_offset:ifd_offset + 2])[0]
+      ifd_offset += 2
+      for unused_i in range(0, ifd_size):
+        if ifd_offset + 12 <= size:
+          tag = struct.unpack(
+              endianness + "H",
+              self._image_data[ifd_offset:ifd_offset + 2])[0]
+          if tag == 0x100 or tag == 0x101:
+            value_type = struct.unpack(
+                endianness + "H",
+                self._image_data[ifd_offset + 2:ifd_offset + 4])[0]
+            if value_type == 3:
+              format = endianness + "H"
+              end_offset = ifd_offset + 10
+            elif value_type == 4:
+              format = endianness + "I"
+              end_offset = ifd_offset + 12
+            else:
+              format = endianness + "B"
+              end_offset = ifd_offset + 9
+            if tag == 0x100:
+              self._width = struct.unpack(
+                  format,
+                  self._image_data[ifd_offset + 8:end_offset])[0]
+              if self._height is not None:
+                break
+            else:
+              self._height = struct.unpack(
+                  format,
+                  self._image_data[ifd_offset + 8:end_offset])[0]
+              if self._width is not None:
+                break
+          ifd_offset += 12
+        else:
+          raise BadImageError("Corrupt TIFF format")
+    if self._width is None or self._height is None:
+      raise BadImageError("Corrupt TIFF format")
+
+  def _update_bmp_dimensions(self):
+    """Updates the width and height fields of the bmp image.
+
+    Raises:
+      BadImageError if the image string is not a valid bmp image.
+    """
+    size = len(self._image_data)
+    if size >= 18:
+      header_length = struct.unpack("<I", self._image_data[14:18])[0]
+      if ((header_length == 40 or header_length == 108 or
+           header_length == 124 or header_length == 64) and size >= 26):
+        self._width, self._height = struct.unpack("<II",
+                                                  self._image_data[18:26])
+      elif header_length == 12 and size >= 22:
+        self._width, self._height = struct.unpack("<HH",
+                                                  self._image_data[18:22])
+      else:
+        raise BadImageError("Corrupt BMP format")
+    else:
+      raise BadImageError("Corrupt BMP format")
+
+  def _update_ico_dimensions(self):
+    """Updates the width and height fields of the ico image.
+
+    Raises:
+      BadImageError if the image string is not a valid ico image.
+    """
+    size = len(self._image_data)
+    if size >= 8:
+      self._width, self._height = struct.unpack("<BB", self._image_data[6:8])
+      if not self._width:
+        self._width = 256
+      if not self._height:
+        self._height = 256
+    else:
+      raise BadImageError("Corrupt ICO format")
+
   def resize(self, width=0, height=0):
     """Resize the image maintaining the aspect ratio.
 
@@ -332,8 +505,24 @@
     self._image_data = response.image().content()
     self._transforms = []
     self._transform_map.clear()
+    self._width = None
+    self._height = None
     return self._image_data
 
+  @property
+  def width(self):
+    """Gets the width of the image."""
+    if self._width is None:
+      self._update_dimensions()
+    return self._width
+
+  @property
+  def height(self):
+    """Gets the height of the image."""
+    if self._height is None:
+      self._update_dimensions()
+    return self._height
+
 
 def resize(image_data, width=0, height=0, output_encoding=PNG):
   """Resize a given image file maintaining the aspect ratio.
--- a/thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -54,12 +54,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -130,12 +124,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -355,12 +343,6 @@
     if self.has_autolevels_ and self.autolevels_ != x.autolevels_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -555,12 +537,6 @@
     if self.has_content_ and self.content_ != x.content_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_content_):
@@ -653,12 +629,6 @@
     if self.has_mime_type_ and self.mime_type_ != x.mime_type_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -766,12 +736,6 @@
     if self.has_output_ and self.output_ != x.output_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_image_):
@@ -907,12 +871,6 @@
     if self.has_image_ and self.image_ != x.image_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_image_):
--- a/thirdparty/google_appengine/google/appengine/api/mail_service_pb.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/mail_service_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -55,12 +55,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -144,12 +138,6 @@
     if self.has_data_ and self.data_ != x.data_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_filename_):
@@ -395,12 +383,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_sender_):
--- a/thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -49,12 +49,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -124,12 +118,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -247,12 +235,6 @@
     if self.has_flags_ and self.flags_ != x.flags_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_key_):
@@ -344,12 +326,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.item_:
@@ -518,12 +494,6 @@
     if self.has_expiration_time_ and self.expiration_time_ != x.expiration_time_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_key_):
@@ -647,12 +617,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.item_:
@@ -778,12 +742,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -884,12 +842,6 @@
     if self.has_delete_time_ and self.delete_time_ != x.delete_time_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_key_):
@@ -969,12 +921,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.item_:
@@ -1086,12 +1032,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -1221,12 +1161,6 @@
     if self.has_direction_ and self.direction_ != x.direction_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_key_):
@@ -1333,12 +1267,6 @@
     if self.has_new_value_ and self.new_value_ != x.new_value_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -1400,12 +1328,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -1456,12 +1378,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -1512,12 +1428,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -1669,12 +1579,6 @@
     if self.has_oldest_item_age_ and self.oldest_item_age_ != x.oldest_item_age_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_hits_):
@@ -1840,12 +1744,6 @@
     if self.has_stats_ and self.stats_ != x.stats_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (self.has_stats_ and not self.stats_.IsInitialized(debug_strs)): initialized = 0
--- a/thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -57,12 +57,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -146,12 +140,6 @@
     if self.has_value_ and self.value_ != x.value_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_key_):
@@ -320,12 +308,6 @@
     if self.has_followredirects_ and self.followredirects_ != x.followredirects_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_method_):
@@ -497,12 +479,6 @@
     if self.has_value_ and self.value_ != x.value_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_key_):
@@ -636,12 +612,6 @@
     if self.has_contentwastruncated_ and self.contentwastruncated_ != x.contentwastruncated_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_statuscode_):
--- a/thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py	Tue Jan 20 13:19:45 2009 +0000
@@ -51,6 +51,7 @@
 
 
 _UNTRUSTED_REQUEST_HEADERS = frozenset([
+  'accept-encoding',
   'content-length',
   'host',
   'referer',
@@ -80,7 +81,7 @@
     """
     (protocol, host, path, parameters, query, fragment) = urlparse.urlparse(request.url())
 
-    payload = ''
+    payload = None
     if request.method() == urlfetch_service_pb.URLFetchRequest.GET:
       method = 'GET'
     elif request.method() == urlfetch_service_pb.URLFetchRequest.POST:
@@ -118,7 +119,7 @@
 
     Args:
       url: String containing the URL to access.
-      payload: Request payload to send, if any.
+      payload: Request payload to send, if any; None if no payload.
       method: HTTP method to use (e.g., 'GET')
       headers: List of additional header objects to use for the request.
       response: Response object
@@ -150,10 +151,11 @@
         protocol = last_protocol
 
       adjusted_headers = {
-        'Content-Length': len(payload),
         'Host': host,
         'Accept': '*/*',
       }
+      if payload is not None:
+        adjusted_headers['Content-Length'] = len(payload)
       if method == 'POST' and payload:
         adjusted_headers['Content-Type'] = 'application/x-www-form-urlencoded'
 
--- a/thirdparty/google_appengine/google/appengine/api/user_service_pb.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/user_service_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -49,12 +49,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
--- a/thirdparty/google_appengine/google/appengine/api/validation.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/validation.py	Tue Jan 20 13:19:45 2009 +0000
@@ -224,16 +224,6 @@
       raise ValidationError('Class \'%s\' does not have attribute \'%s\''
                             % (self.__class__, key))
 
-  def __eq__(self, other):
-    """Comparison operator."""
-    if isinstance(other, type(self)):
-      for attribute in self.ATTRIBUTES:
-        if getattr(self, attribute) != getattr(other, attribute):
-          return False
-      return True
-    else:
-      return False
-
   def __str__(self):
     """Formatted view of validated object and nested values."""
     return repr(self)
@@ -678,6 +668,113 @@
     return cast_value
 
 
+class _RegexStrValue(object):
+  """Simulates the regex object to support recomplation when necessary.
+
+  Used by the RegexStr class to dynamically build and recompile regular
+  expression attributes of a validated object.  This object replaces the normal
+  object returned from re.compile which is immutable.
+
+  When the value of this object is a string, that string is simply used as the
+  regular expression when recompilation is needed.  If the state of this object
+  is a list of strings, the strings are joined in to a single 'or' expression.
+  """
+
+  def __init__(self, attribute, value):
+    """Initialize recompilable regex value.
+
+    Args:
+      attribute: Attribute validator associated with this regex value.
+      value: Initial underlying python value for regex string.  Either a single
+        regex string or a list of regex strings.
+    """
+    self.__attribute = attribute
+    self.__value = value
+    self.__regex = None
+
+  def __AsString(self, value):
+    """Convert a value to appropriate string.
+
+    Returns:
+      String version of value with all carriage returns and line feeds removed.
+    """
+    if issubclass(self.__attribute.expected_type, str):
+      cast_value = TYPE_STR(value)
+    else:
+      cast_value = TYPE_UNICODE(value)
+
+    cast_value = cast_value.replace('\n', '')
+    cast_value = cast_value.replace('\r', '')
+    return cast_value
+
+  def __BuildRegex(self):
+    """Build regex string from state.
+
+    Returns:
+      String version of regular expression.  Sequence objects are constructed
+      as larger regular expression where each regex in the list is joined with
+      all the others as single 'or' expression.
+    """
+    if isinstance(self.__value, list):
+      value_list = self.__value
+      sequence = True
+    else:
+      value_list = [self.__value]
+      sequence = False
+
+    regex_list = []
+    for item in value_list:
+      regex_list.append(self.__AsString(item))
+
+    if sequence:
+      return '|'.join('(?:%s)' % item for item in regex_list)
+    else:
+      return regex_list[0]
+
+  def __Compile(self):
+    """Build regular expression object from state.
+
+    Returns:
+      Compiled regular expression based on internal value.
+    """
+    regex = self.__BuildRegex()
+    try:
+      return re.compile(regex)
+    except re.error, e:
+      raise ValidationError('Value \'%s\' does not compile: %s' % (regex, e), e)
+
+  @property
+  def regex(self):
+    """Compiled regular expression as described by underlying value."""
+    return self.__Compile()
+
+  def match(self, value):
+    """Match against internal regular expression.
+
+    Returns:
+      Regular expression object built from underlying value.
+    """
+    return re.match(self.__BuildRegex(), value)
+
+  def Validate(self):
+    """Ensure that regex string compiles."""
+    self.__Compile()
+
+  def __str__(self):
+    """Regular expression string as described by underlying value."""
+    return self.__BuildRegex()
+
+  def __eq__(self, other):
+    """Comparison against other regular expression string values."""
+    if isinstance(other, _RegexStrValue):
+      return self.__BuildRegex() == other.__BuildRegex()
+    return str(self) == other
+
+  def __ne__(self, other):
+    """Inequality operator for regular expression string value."""
+    return not self.__eq__(other)
+
+
 class RegexStr(Validator):
   """Validates that a string can compile as a regex without errors.
 
@@ -693,7 +790,8 @@
       AttributeDefinitionError if string_type is not a kind of string.
     """
     if default is not None:
-      default = re.compile(default)
+      default = _RegexStrValue(self, default)
+      re.compile(str(default))
     super(RegexStr, self).__init__(default)
     if (not issubclass(string_type, basestring) or
         string_type is basestring):
@@ -715,22 +813,15 @@
       ValueError when value does not compile as a regular expression.  TypeError
       when value does not match provided string type.
     """
-    if issubclass(self.expected_type, str):
-      cast_value = TYPE_STR(value)
-    else:
-      cast_value = TYPE_UNICODE(value)
-
-    cast_value = cast_value.replace('\n', '')
-    cast_value = cast_value.replace('\r', '')
-    try:
-      compiled = re.compile(cast_value)
-    except re.error, e:
-      raise ValidationError('Value \'%s\' does not compile: %s' % (value, e), e)
-    return compiled
+    if isinstance(value, _RegexStrValue):
+      return value
+    value = _RegexStrValue(self, value)
+    value.Validate()
+    return value
 
   def ToValue(self, value):
     """Returns the RE pattern for this validator."""
-    return value.pattern
+    return str(value)
 
 
 class Range(Validator):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/base/capabilities_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,444 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.net.proto import ProtocolBuffer
+import array
+import dummy_thread as thread
+
+__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
+                   unusednames=printElemNumber,debug_strs no-special"""
+
+class CapabilityConfigList(ProtocolBuffer.ProtocolMessage):
+  has_default_config_ = 0
+  default_config_ = None
+
+  def __init__(self, contents=None):
+    self.config_ = []
+    self.lazy_init_lock_ = thread.allocate_lock()
+    if contents is not None: self.MergeFromString(contents)
+
+  def config_size(self): return len(self.config_)
+  def config_list(self): return self.config_
+
+  def config(self, i):
+    return self.config_[i]
+
+  def mutable_config(self, i):
+    return self.config_[i]
+
+  def add_config(self):
+    x = CapabilityConfig()
+    self.config_.append(x)
+    return x
+
+  def clear_config(self):
+    self.config_ = []
+  def default_config(self):
+    if self.default_config_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.default_config_ is None: self.default_config_ = CapabilityConfig()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.default_config_
+
+  def mutable_default_config(self): self.has_default_config_ = 1; return self.default_config()
+
+  def clear_default_config(self):
+    self.has_default_config_ = 0;
+    if self.default_config_ is not None: self.default_config_.Clear()
+
+  def has_default_config(self): return self.has_default_config_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.config_size()): self.add_config().CopyFrom(x.config(i))
+    if (x.has_default_config()): self.mutable_default_config().MergeFrom(x.default_config())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.config_) != len(x.config_): return 0
+    for e1, e2 in zip(self.config_, x.config_):
+      if e1 != e2: return 0
+    if self.has_default_config_ != x.has_default_config_: return 0
+    if self.has_default_config_ and self.default_config_ != x.default_config_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.config_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    if (self.has_default_config_ and not self.default_config_.IsInitialized(debug_strs)): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 1 * len(self.config_)
+    for i in xrange(len(self.config_)): n += self.lengthString(self.config_[i].ByteSize())
+    if (self.has_default_config_): n += 1 + self.lengthString(self.default_config_.ByteSize())
+    return n + 0
+
+  def Clear(self):
+    self.clear_config()
+    self.clear_default_config()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.config_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.config_[i].ByteSize())
+      self.config_[i].OutputUnchecked(out)
+    if (self.has_default_config_):
+      out.putVarInt32(18)
+      out.putVarInt32(self.default_config_.ByteSize())
+      self.default_config_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_config().TryMerge(tmp)
+        continue
+      if tt == 18:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_default_config().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.config_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("config%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    if self.has_default_config_:
+      res+=prefix+"default_config <\n"
+      res+=self.default_config_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kconfig = 1
+  kdefault_config = 2
+
+  _TEXT = (
+   "ErrorCode",
+   "config",
+   "default_config",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class CapabilityConfig(ProtocolBuffer.ProtocolMessage):
+
+  ENABLED      =    1
+  SCHEDULED    =    2
+  DISABLED     =    3
+  UNKNOWN      =    4
+
+  _Status_NAMES = {
+    1: "ENABLED",
+    2: "SCHEDULED",
+    3: "DISABLED",
+    4: "UNKNOWN",
+  }
+
+  def Status_Name(cls, x): return cls._Status_NAMES.get(x, "")
+  Status_Name = classmethod(Status_Name)
+
+  has_package_ = 0
+  package_ = ""
+  has_capability_ = 0
+  capability_ = ""
+  has_status_ = 0
+  status_ = 4
+  has_scheduled_time_ = 0
+  scheduled_time_ = ""
+  has_internal_message_ = 0
+  internal_message_ = ""
+  has_admin_message_ = 0
+  admin_message_ = ""
+  has_error_message_ = 0
+  error_message_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def package(self): return self.package_
+
+  def set_package(self, x):
+    self.has_package_ = 1
+    self.package_ = x
+
+  def clear_package(self):
+    self.has_package_ = 0
+    self.package_ = ""
+
+  def has_package(self): return self.has_package_
+
+  def capability(self): return self.capability_
+
+  def set_capability(self, x):
+    self.has_capability_ = 1
+    self.capability_ = x
+
+  def clear_capability(self):
+    self.has_capability_ = 0
+    self.capability_ = ""
+
+  def has_capability(self): return self.has_capability_
+
+  def status(self): return self.status_
+
+  def set_status(self, x):
+    self.has_status_ = 1
+    self.status_ = x
+
+  def clear_status(self):
+    self.has_status_ = 0
+    self.status_ = 4
+
+  def has_status(self): return self.has_status_
+
+  def scheduled_time(self): return self.scheduled_time_
+
+  def set_scheduled_time(self, x):
+    self.has_scheduled_time_ = 1
+    self.scheduled_time_ = x
+
+  def clear_scheduled_time(self):
+    self.has_scheduled_time_ = 0
+    self.scheduled_time_ = ""
+
+  def has_scheduled_time(self): return self.has_scheduled_time_
+
+  def internal_message(self): return self.internal_message_
+
+  def set_internal_message(self, x):
+    self.has_internal_message_ = 1
+    self.internal_message_ = x
+
+  def clear_internal_message(self):
+    self.has_internal_message_ = 0
+    self.internal_message_ = ""
+
+  def has_internal_message(self): return self.has_internal_message_
+
+  def admin_message(self): return self.admin_message_
+
+  def set_admin_message(self, x):
+    self.has_admin_message_ = 1
+    self.admin_message_ = x
+
+  def clear_admin_message(self):
+    self.has_admin_message_ = 0
+    self.admin_message_ = ""
+
+  def has_admin_message(self): return self.has_admin_message_
+
+  def error_message(self): return self.error_message_
+
+  def set_error_message(self, x):
+    self.has_error_message_ = 1
+    self.error_message_ = x
+
+  def clear_error_message(self):
+    self.has_error_message_ = 0
+    self.error_message_ = ""
+
+  def has_error_message(self): return self.has_error_message_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_package()): self.set_package(x.package())
+    if (x.has_capability()): self.set_capability(x.capability())
+    if (x.has_status()): self.set_status(x.status())
+    if (x.has_scheduled_time()): self.set_scheduled_time(x.scheduled_time())
+    if (x.has_internal_message()): self.set_internal_message(x.internal_message())
+    if (x.has_admin_message()): self.set_admin_message(x.admin_message())
+    if (x.has_error_message()): self.set_error_message(x.error_message())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_package_ != x.has_package_: return 0
+    if self.has_package_ and self.package_ != x.package_: return 0
+    if self.has_capability_ != x.has_capability_: return 0
+    if self.has_capability_ and self.capability_ != x.capability_: return 0
+    if self.has_status_ != x.has_status_: return 0
+    if self.has_status_ and self.status_ != x.status_: return 0
+    if self.has_scheduled_time_ != x.has_scheduled_time_: return 0
+    if self.has_scheduled_time_ and self.scheduled_time_ != x.scheduled_time_: return 0
+    if self.has_internal_message_ != x.has_internal_message_: return 0
+    if self.has_internal_message_ and self.internal_message_ != x.internal_message_: return 0
+    if self.has_admin_message_ != x.has_admin_message_: return 0
+    if self.has_admin_message_ and self.admin_message_ != x.admin_message_: return 0
+    if self.has_error_message_ != x.has_error_message_: return 0
+    if self.has_error_message_ and self.error_message_ != x.error_message_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_package_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: package not set.')
+    if (not self.has_capability_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: capability not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.package_))
+    n += self.lengthString(len(self.capability_))
+    if (self.has_status_): n += 1 + self.lengthVarInt64(self.status_)
+    if (self.has_scheduled_time_): n += 1 + self.lengthString(len(self.scheduled_time_))
+    if (self.has_internal_message_): n += 1 + self.lengthString(len(self.internal_message_))
+    if (self.has_admin_message_): n += 1 + self.lengthString(len(self.admin_message_))
+    if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_))
+    return n + 2
+
+  def Clear(self):
+    self.clear_package()
+    self.clear_capability()
+    self.clear_status()
+    self.clear_scheduled_time()
+    self.clear_internal_message()
+    self.clear_admin_message()
+    self.clear_error_message()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.package_)
+    out.putVarInt32(18)
+    out.putPrefixedString(self.capability_)
+    if (self.has_status_):
+      out.putVarInt32(24)
+      out.putVarInt32(self.status_)
+    if (self.has_internal_message_):
+      out.putVarInt32(34)
+      out.putPrefixedString(self.internal_message_)
+    if (self.has_admin_message_):
+      out.putVarInt32(42)
+      out.putPrefixedString(self.admin_message_)
+    if (self.has_error_message_):
+      out.putVarInt32(50)
+      out.putPrefixedString(self.error_message_)
+    if (self.has_scheduled_time_):
+      out.putVarInt32(58)
+      out.putPrefixedString(self.scheduled_time_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_package(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.set_capability(d.getPrefixedString())
+        continue
+      if tt == 24:
+        self.set_status(d.getVarInt32())
+        continue
+      if tt == 34:
+        self.set_internal_message(d.getPrefixedString())
+        continue
+      if tt == 42:
+        self.set_admin_message(d.getPrefixedString())
+        continue
+      if tt == 50:
+        self.set_error_message(d.getPrefixedString())
+        continue
+      if tt == 58:
+        self.set_scheduled_time(d.getPrefixedString())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_package_: res+=prefix+("package: %s\n" % self.DebugFormatString(self.package_))
+    if self.has_capability_: res+=prefix+("capability: %s\n" % self.DebugFormatString(self.capability_))
+    if self.has_status_: res+=prefix+("status: %s\n" % self.DebugFormatInt32(self.status_))
+    if self.has_scheduled_time_: res+=prefix+("scheduled_time: %s\n" % self.DebugFormatString(self.scheduled_time_))
+    if self.has_internal_message_: res+=prefix+("internal_message: %s\n" % self.DebugFormatString(self.internal_message_))
+    if self.has_admin_message_: res+=prefix+("admin_message: %s\n" % self.DebugFormatString(self.admin_message_))
+    if self.has_error_message_: res+=prefix+("error_message: %s\n" % self.DebugFormatString(self.error_message_))
+    return res
+
+  kpackage = 1
+  kcapability = 2
+  kstatus = 3
+  kscheduled_time = 7
+  kinternal_message = 4
+  kadmin_message = 5
+  kerror_message = 6
+
+  _TEXT = (
+   "ErrorCode",
+   "package",
+   "capability",
+   "status",
+   "internal_message",
+   "admin_message",
+   "error_message",
+   "scheduled_time",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+
+__all__ = ['CapabilityConfigList','CapabilityConfig']
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/cron/GrocLexer.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,1639 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import sys
+from antlr3 import *
+from antlr3.compat import set, frozenset
+
+
+HIDDEN = BaseRecognizer.HIDDEN
+
+THIRD=12
+SEPTEMBER=34
+FOURTH=13
+SECOND=11
+WEDNESDAY=20
+NOVEMBER=36
+SATURDAY=23
+JULY=32
+APRIL=29
+DIGITS=8
+OCTOBER=35
+MAY=30
+EVERY=6
+FEBRUARY=27
+MONDAY=18
+SUNDAY=24
+JUNE=31
+OF=4
+MARCH=28
+EOF=-1
+JANUARY=26
+MONTH=25
+FRIDAY=22
+MINUTES=17
+FIFTH=14
+TIME=5
+WS=39
+QUARTER=38
+THURSDAY=21
+COMMA=9
+DECEMBER=37
+AUGUST=33
+DIGIT=7
+TUESDAY=19
+HOURS=16
+FOURTH_OR_FIFTH=15
+FIRST=10
+
+
+class GrocLexer(Lexer):
+
+    grammarFileName = "Groc.g"
+    antlr_version = version_str_to_tuple("3.1.1")
+    antlr_version_str = "3.1.1"
+
+    def __init__(self, input=None, state=None):
+        if state is None:
+            state = RecognizerSharedState()
+        Lexer.__init__(self, input, state)
+
+        self.dfa25 = self.DFA25(
+            self, 25,
+            eot = self.DFA25_eot,
+            eof = self.DFA25_eof,
+            min = self.DFA25_min,
+            max = self.DFA25_max,
+            accept = self.DFA25_accept,
+            special = self.DFA25_special,
+            transition = self.DFA25_transition
+            )
+
+
+
+
+
+
+    def mTIME(self, ):
+
+        try:
+            _type = TIME
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            alt1 = 4
+            LA1 = self.input.LA(1)
+            if LA1 == 48:
+                LA1_1 = self.input.LA(2)
+
+                if ((48 <= LA1_1 <= 57)) :
+                    alt1 = 2
+                elif (LA1_1 == 58) :
+                    alt1 = 1
+                else:
+                    nvae = NoViableAltException("", 1, 1, self.input)
+
+                    raise nvae
+
+            elif LA1 == 49:
+                LA1_2 = self.input.LA(2)
+
+                if ((48 <= LA1_2 <= 57)) :
+                    alt1 = 3
+                elif (LA1_2 == 58) :
+                    alt1 = 1
+                else:
+                    nvae = NoViableAltException("", 1, 2, self.input)
+
+                    raise nvae
+
+            elif LA1 == 50:
+                LA1_3 = self.input.LA(2)
+
+                if ((48 <= LA1_3 <= 52)) :
+                    alt1 = 4
+                elif (LA1_3 == 58) :
+                    alt1 = 1
+                else:
+                    nvae = NoViableAltException("", 1, 3, self.input)
+
+                    raise nvae
+
+            elif LA1 == 51 or LA1 == 52 or LA1 == 53 or LA1 == 54 or LA1 == 55 or LA1 == 56 or LA1 == 57:
+                alt1 = 1
+            else:
+                nvae = NoViableAltException("", 1, 0, self.input)
+
+                raise nvae
+
+            if alt1 == 1:
+                pass
+                self.mDIGIT()
+
+
+            elif alt1 == 2:
+                pass
+                pass
+                self.match(48)
+                self.mDIGIT()
+
+
+
+
+
+            elif alt1 == 3:
+                pass
+                pass
+                self.match(49)
+                self.mDIGIT()
+
+
+
+
+
+            elif alt1 == 4:
+                pass
+                pass
+                self.match(50)
+                self.matchRange(48, 52)
+
+
+
+
+
+
+            self.match(58)
+            pass
+            self.matchRange(48, 53)
+            self.mDIGIT()
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mFIRST(self, ):
+
+        try:
+            _type = FIRST
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            alt2 = 2
+            LA2_0 = self.input.LA(1)
+
+            if (LA2_0 == 49) :
+                alt2 = 1
+            elif (LA2_0 == 102) :
+                alt2 = 2
+            else:
+                nvae = NoViableAltException("", 2, 0, self.input)
+
+                raise nvae
+
+            if alt2 == 1:
+                pass
+                self.match("1st")
+
+
+            elif alt2 == 2:
+                pass
+                self.match("first")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mSECOND(self, ):
+
+        try:
+            _type = SECOND
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            alt3 = 2
+            LA3_0 = self.input.LA(1)
+
+            if (LA3_0 == 50) :
+                alt3 = 1
+            elif (LA3_0 == 115) :
+                alt3 = 2
+            else:
+                nvae = NoViableAltException("", 3, 0, self.input)
+
+                raise nvae
+
+            if alt3 == 1:
+                pass
+                self.match("2nd")
+
+
+            elif alt3 == 2:
+                pass
+                self.match("second")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mTHIRD(self, ):
+
+        try:
+            _type = THIRD
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            alt4 = 2
+            LA4_0 = self.input.LA(1)
+
+            if (LA4_0 == 51) :
+                alt4 = 1
+            elif (LA4_0 == 116) :
+                alt4 = 2
+            else:
+                nvae = NoViableAltException("", 4, 0, self.input)
+
+                raise nvae
+
+            if alt4 == 1:
+                pass
+                self.match("3rd")
+
+
+            elif alt4 == 2:
+                pass
+                self.match("third")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mFOURTH(self, ):
+
+        try:
+            _type = FOURTH
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.match("4th")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mFIFTH(self, ):
+
+        try:
+            _type = FIFTH
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.match("5th")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mFOURTH_OR_FIFTH(self, ):
+
+        try:
+            _type = FOURTH_OR_FIFTH
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            alt5 = 2
+            LA5_0 = self.input.LA(1)
+
+            if (LA5_0 == 102) :
+                LA5_1 = self.input.LA(2)
+
+                if (LA5_1 == 111) :
+                    alt5 = 1
+                elif (LA5_1 == 105) :
+                    alt5 = 2
+                else:
+                    nvae = NoViableAltException("", 5, 1, self.input)
+
+                    raise nvae
+
+            else:
+                nvae = NoViableAltException("", 5, 0, self.input)
+
+                raise nvae
+
+            if alt5 == 1:
+                pass
+                pass
+                self.match("fourth")
+                _type = FOURTH;
+
+
+
+
+
+            elif alt5 == 2:
+                pass
+                pass
+                self.match("fifth")
+                _type = FIFTH;
+
+
+
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mMONDAY(self, ):
+
+        try:
+            _type = MONDAY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("mon")
+            alt6 = 2
+            LA6_0 = self.input.LA(1)
+
+            if (LA6_0 == 100) :
+                alt6 = 1
+            if alt6 == 1:
+                pass
+                self.match("day")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mTUESDAY(self, ):
+
+        try:
+            _type = TUESDAY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("tue")
+            alt7 = 2
+            LA7_0 = self.input.LA(1)
+
+            if (LA7_0 == 115) :
+                alt7 = 1
+            if alt7 == 1:
+                pass
+                self.match("sday")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mWEDNESDAY(self, ):
+
+        try:
+            _type = WEDNESDAY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("wed")
+            alt8 = 2
+            LA8_0 = self.input.LA(1)
+
+            if (LA8_0 == 110) :
+                alt8 = 1
+            if alt8 == 1:
+                pass
+                self.match("nesday")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mTHURSDAY(self, ):
+
+        try:
+            _type = THURSDAY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("thu")
+            alt9 = 2
+            LA9_0 = self.input.LA(1)
+
+            if (LA9_0 == 114) :
+                alt9 = 1
+            if alt9 == 1:
+                pass
+                self.match("rsday")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mFRIDAY(self, ):
+
+        try:
+            _type = FRIDAY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("fri")
+            alt10 = 2
+            LA10_0 = self.input.LA(1)
+
+            if (LA10_0 == 100) :
+                alt10 = 1
+            if alt10 == 1:
+                pass
+                self.match("day")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mSATURDAY(self, ):
+
+        try:
+            _type = SATURDAY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("sat")
+            alt11 = 2
+            LA11_0 = self.input.LA(1)
+
+            if (LA11_0 == 117) :
+                alt11 = 1
+            if alt11 == 1:
+                pass
+                self.match("urday")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mSUNDAY(self, ):
+
+        try:
+            _type = SUNDAY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("sun")
+            alt12 = 2
+            LA12_0 = self.input.LA(1)
+
+            if (LA12_0 == 100) :
+                alt12 = 1
+            if alt12 == 1:
+                pass
+                self.match("day")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mJANUARY(self, ):
+
+        try:
+            _type = JANUARY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("jan")
+            alt13 = 2
+            LA13_0 = self.input.LA(1)
+
+            if (LA13_0 == 117) :
+                alt13 = 1
+            if alt13 == 1:
+                pass
+                self.match("uary")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mFEBRUARY(self, ):
+
+        try:
+            _type = FEBRUARY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("feb")
+            alt14 = 2
+            LA14_0 = self.input.LA(1)
+
+            if (LA14_0 == 114) :
+                alt14 = 1
+            if alt14 == 1:
+                pass
+                self.match("ruary")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mMARCH(self, ):
+
+        try:
+            _type = MARCH
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("mar")
+            alt15 = 2
+            LA15_0 = self.input.LA(1)
+
+            if (LA15_0 == 99) :
+                alt15 = 1
+            if alt15 == 1:
+                pass
+                self.match("ch")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mAPRIL(self, ):
+
+        try:
+            _type = APRIL
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("apr")
+            alt16 = 2
+            LA16_0 = self.input.LA(1)
+
+            if (LA16_0 == 105) :
+                alt16 = 1
+            if alt16 == 1:
+                pass
+                self.match("il")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mMAY(self, ):
+
+        try:
+            _type = MAY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("may")
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mJUNE(self, ):
+
+        try:
+            _type = JUNE
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("jun")
+            alt17 = 2
+            LA17_0 = self.input.LA(1)
+
+            if (LA17_0 == 101) :
+                alt17 = 1
+            if alt17 == 1:
+                pass
+                self.match(101)
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mJULY(self, ):
+
+        try:
+            _type = JULY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("jul")
+            alt18 = 2
+            LA18_0 = self.input.LA(1)
+
+            if (LA18_0 == 121) :
+                alt18 = 1
+            if alt18 == 1:
+                pass
+                self.match(121)
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mAUGUST(self, ):
+
+        try:
+            _type = AUGUST
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("aug")
+            alt19 = 2
+            LA19_0 = self.input.LA(1)
+
+            if (LA19_0 == 117) :
+                alt19 = 1
+            if alt19 == 1:
+                pass
+                self.match("ust")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mSEPTEMBER(self, ):
+
+        try:
+            _type = SEPTEMBER
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("sep")
+            alt20 = 2
+            LA20_0 = self.input.LA(1)
+
+            if (LA20_0 == 116) :
+                alt20 = 1
+            if alt20 == 1:
+                pass
+                self.match("tember")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mOCTOBER(self, ):
+
+        try:
+            _type = OCTOBER
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("oct")
+            alt21 = 2
+            LA21_0 = self.input.LA(1)
+
+            if (LA21_0 == 111) :
+                alt21 = 1
+            if alt21 == 1:
+                pass
+                self.match("ober")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mNOVEMBER(self, ):
+
+        try:
+            _type = NOVEMBER
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("nov")
+            alt22 = 2
+            LA22_0 = self.input.LA(1)
+
+            if (LA22_0 == 101) :
+                alt22 = 1
+            if alt22 == 1:
+                pass
+                self.match("ember")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mDECEMBER(self, ):
+
+        try:
+            _type = DECEMBER
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            self.match("dec")
+            alt23 = 2
+            LA23_0 = self.input.LA(1)
+
+            if (LA23_0 == 101) :
+                alt23 = 1
+            if alt23 == 1:
+                pass
+                self.match("ember")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mMONTH(self, ):
+
+        try:
+            _type = MONTH
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.match("month")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mQUARTER(self, ):
+
+        try:
+            _type = QUARTER
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.match("quarter")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mEVERY(self, ):
+
+        try:
+            _type = EVERY
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.match("every")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mHOURS(self, ):
+
+        try:
+            _type = HOURS
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.match("hours")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mMINUTES(self, ):
+
+        try:
+            _type = MINUTES
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            alt24 = 2
+            LA24_0 = self.input.LA(1)
+
+            if (LA24_0 == 109) :
+                LA24_1 = self.input.LA(2)
+
+                if (LA24_1 == 105) :
+                    LA24_2 = self.input.LA(3)
+
+                    if (LA24_2 == 110) :
+                        LA24_3 = self.input.LA(4)
+
+                        if (LA24_3 == 115) :
+                            alt24 = 1
+                        elif (LA24_3 == 117) :
+                            alt24 = 2
+                        else:
+                            nvae = NoViableAltException("", 24, 3, self.input)
+
+                            raise nvae
+
+                    else:
+                        nvae = NoViableAltException("", 24, 2, self.input)
+
+                        raise nvae
+
+                else:
+                    nvae = NoViableAltException("", 24, 1, self.input)
+
+                    raise nvae
+
+            else:
+                nvae = NoViableAltException("", 24, 0, self.input)
+
+                raise nvae
+
+            if alt24 == 1:
+                pass
+                self.match("mins")
+
+
+            elif alt24 == 2:
+                pass
+                self.match("minutes")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mCOMMA(self, ):
+
+        try:
+            _type = COMMA
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.match(44)
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mOF(self, ):
+
+        try:
+            _type = OF
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.match("of")
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mWS(self, ):
+
+        try:
+            _type = WS
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            if (9 <= self.input.LA(1) <= 10) or self.input.LA(1) == 13 or self.input.LA(1) == 32:
+                self.input.consume()
+            else:
+                mse = MismatchedSetException(None, self.input)
+                self.recover(mse)
+                raise mse
+
+            _channel=HIDDEN;
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mDIGIT(self, ):
+
+        try:
+            _type = DIGIT
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.matchRange(48, 57)
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mDIGITS(self, ):
+
+        try:
+            _type = DIGITS
+            _channel = DEFAULT_CHANNEL
+
+            pass
+            pass
+            self.mDIGIT()
+            self.mDIGIT()
+
+
+
+
+
+
+            self._state.type = _type
+            self._state.channel = _channel
+
+        finally:
+
+            pass
+
+
+
+
+    def mTokens(self):
+        alt25 = 36
+        alt25 = self.dfa25.predict(self.input)
+        if alt25 == 1:
+            pass
+            self.mTIME()
+
+
+        elif alt25 == 2:
+            pass
+            self.mFIRST()
+
+
+        elif alt25 == 3:
+            pass
+            self.mSECOND()
+
+
+        elif alt25 == 4:
+            pass
+            self.mTHIRD()
+
+
+        elif alt25 == 5:
+            pass
+            self.mFOURTH()
+
+
+        elif alt25 == 6:
+            pass
+            self.mFIFTH()
+
+
+        elif alt25 == 7:
+            pass
+            self.mFOURTH_OR_FIFTH()
+
+
+        elif alt25 == 8:
+            pass
+            self.mMONDAY()
+
+
+        elif alt25 == 9:
+            pass
+            self.mTUESDAY()
+
+
+        elif alt25 == 10:
+            pass
+            self.mWEDNESDAY()
+
+
+        elif alt25 == 11:
+            pass
+            self.mTHURSDAY()
+
+
+        elif alt25 == 12:
+            pass
+            self.mFRIDAY()
+
+
+        elif alt25 == 13:
+            pass
+            self.mSATURDAY()
+
+
+        elif alt25 == 14:
+            pass
+            self.mSUNDAY()
+
+
+        elif alt25 == 15:
+            pass
+            self.mJANUARY()
+
+
+        elif alt25 == 16:
+            pass
+            self.mFEBRUARY()
+
+
+        elif alt25 == 17:
+            pass
+            self.mMARCH()
+
+
+        elif alt25 == 18:
+            pass
+            self.mAPRIL()
+
+
+        elif alt25 == 19:
+            pass
+            self.mMAY()
+
+
+        elif alt25 == 20:
+            pass
+            self.mJUNE()
+
+
+        elif alt25 == 21:
+            pass
+            self.mJULY()
+
+
+        elif alt25 == 22:
+            pass
+            self.mAUGUST()
+
+
+        elif alt25 == 23:
+            pass
+            self.mSEPTEMBER()
+
+
+        elif alt25 == 24:
+            pass
+            self.mOCTOBER()
+
+
+        elif alt25 == 25:
+            pass
+            self.mNOVEMBER()
+
+
+        elif alt25 == 26:
+            pass
+            self.mDECEMBER()
+
+
+        elif alt25 == 27:
+            pass
+            self.mMONTH()
+
+
+        elif alt25 == 28:
+            pass
+            self.mQUARTER()
+
+
+        elif alt25 == 29:
+            pass
+            self.mEVERY()
+
+
+        elif alt25 == 30:
+            pass
+            self.mHOURS()
+
+
+        elif alt25 == 31:
+            pass
+            self.mMINUTES()
+
+
+        elif alt25 == 32:
+            pass
+            self.mCOMMA()
+
+
+        elif alt25 == 33:
+            pass
+            self.mOF()
+
+
+        elif alt25 == 34:
+            pass
+            self.mWS()
+
+
+        elif alt25 == 35:
+            pass
+            self.mDIGIT()
+
+
+        elif alt25 == 36:
+            pass
+            self.mDIGITS()
+
+
+
+
+
+
+
+
+    DFA25_eot = DFA.unpack(
+        u"\1\uffff\4\27\2\uffff\1\27\1\uffff\2\27\16\uffff\1\36\1\uffff\2"
+        u"\36\31\uffff\1\74\6\uffff"
+        )
+
+    DFA25_eof = DFA.unpack(
+        u"\75\uffff"
+        )
+
+    DFA25_min = DFA.unpack(
+        u"\1\11\4\60\1\145\1\141\1\60\1\150\2\60\1\141\1\uffff\1\141\1\160"
+        u"\1\143\11\uffff\1\72\1\uffff\2\72\3\uffff\1\146\3\uffff\1\143\3"
+        u"\uffff\1\151\2\uffff\1\156\1\162\2\uffff\1\154\6\uffff\1\164\6"
+        u"\uffff"
+        )
+
+    DFA25_max = DFA.unpack(
+        u"\1\167\1\72\1\163\1\156\2\162\1\165\1\164\1\165\1\164\1\72\1\157"
+        u"\1\uffff\2\165\1\146\11\uffff\1\72\1\uffff\2\72\3\uffff\1\162\3"
+        u"\uffff\1\160\3\uffff\1\165\2\uffff\1\156\1\171\2\uffff\1\156\6"
+        u"\uffff\1\164\6\uffff"
+        )
+
+    DFA25_accept = DFA.unpack(
+        u"\14\uffff\1\12\3\uffff\1\31\1\32\1\34\1\35\1\36\1\40\1\42\1\43"
+        u"\1\1\1\uffff\1\2\2\uffff\1\3\1\44\1\4\1\uffff\1\7\1\14\1\20\1\uffff"
+        u"\1\15\1\16\1\5\1\uffff\1\11\1\6\2\uffff\1\37\1\17\1\uffff\1\22"
+        u"\1\26\1\30\1\41\1\27\1\13\1\uffff\1\21\1\23\1\24\1\25\1\33\1\10"
+        )
+
+    DFA25_special = DFA.unpack(
+        u"\75\uffff"
+        )
+
+
+    DFA25_transition = [
+        DFA.unpack(u"\2\26\2\uffff\1\26\22\uffff\1\26\13\uffff\1\25\3\uffff"
+        u"\1\1\1\2\1\3\1\4\1\7\1\11\4\12\47\uffff\1\16\2\uffff\1\21\1\23"
+        u"\1\5\1\uffff\1\24\1\uffff\1\15\2\uffff\1\13\1\20\1\17\1\uffff\1"
+        u"\22\1\uffff\1\6\1\10\2\uffff\1\14"),
+        DFA.unpack(u"\12\31\1\30"),
+        DFA.unpack(u"\12\33\1\30\70\uffff\1\32"),
+        DFA.unpack(u"\5\34\5\36\1\30\63\uffff\1\35"),
+        DFA.unpack(u"\12\36\1\30\67\uffff\1\37"),
+        DFA.unpack(u"\1\43\3\uffff\1\40\5\uffff\1\41\2\uffff\1\42"),
+        DFA.unpack(u"\1\45\3\uffff\1\44\17\uffff\1\46"),
+        DFA.unpack(u"\12\36\1\30\71\uffff\1\47"),
+        DFA.unpack(u"\1\50\14\uffff\1\51"),
+        DFA.unpack(u"\12\36\1\30\71\uffff\1\52"),
+        DFA.unpack(u"\12\36\1\30"),
+        DFA.unpack(u"\1\54\7\uffff\1\55\5\uffff\1\53"),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\56\23\uffff\1\57"),
+        DFA.unpack(u"\1\60\4\uffff\1\61"),
+        DFA.unpack(u"\1\62\2\uffff\1\63"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\30"),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\30"),
+        DFA.unpack(u"\1\30"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\41\13\uffff\1\32"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\35\14\uffff\1\64"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\37\13\uffff\1\65"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\66"),
+        DFA.unpack(u"\1\67\6\uffff\1\70"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\72\1\uffff\1\71"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"\1\73"),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u""),
+        DFA.unpack(u"")
+    ]
+
+
+    DFA25 = DFA
+
+
+
+
+def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
+    from antlr3.main import LexerMain
+    main = LexerMain(GrocLexer)
+    main.stdin = stdin
+    main.stdout = stdout
+    main.stderr = stderr
+    main.execute(argv)
+
+
+if __name__ == '__main__':
+    main(sys.argv)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/cron/GrocParser.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,903 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import sys
+from antlr3 import *
+from antlr3.compat import set, frozenset
+
+
+
+
+
+allOrdinals = set([1, 2, 3, 4, 5])
+numOrdinals = len(allOrdinals)
+
+
+
+
+HIDDEN = BaseRecognizer.HIDDEN
+
+THIRD=12
+SEPTEMBER=34
+FOURTH=13
+SECOND=11
+WEDNESDAY=20
+NOVEMBER=36
+SATURDAY=23
+JULY=32
+APRIL=29
+DIGITS=8
+OCTOBER=35
+MAY=30
+EVERY=6
+FEBRUARY=27
+MONDAY=18
+SUNDAY=24
+JUNE=31
+MARCH=28
+OF=4
+EOF=-1
+JANUARY=26
+MONTH=25
+FRIDAY=22
+FIFTH=14
+MINUTES=17
+TIME=5
+WS=39
+QUARTER=38
+THURSDAY=21
+COMMA=9
+DECEMBER=37
+AUGUST=33
+DIGIT=7
+TUESDAY=19
+HOURS=16
+FIRST=10
+FOURTH_OR_FIFTH=15
+
+tokenNames = [
+    "<invalid>", "<EOR>", "<DOWN>", "<UP>",
+    "OF", "TIME", "EVERY", "DIGIT", "DIGITS", "COMMA", "FIRST", "SECOND",
+    "THIRD", "FOURTH", "FIFTH", "FOURTH_OR_FIFTH", "HOURS", "MINUTES", "MONDAY",
+    "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY",
+    "MONTH", "JANUARY", "FEBRUARY", "MARCH", "APRIL", "MAY", "JUNE", "JULY",
+    "AUGUST", "SEPTEMBER", "OCTOBER", "NOVEMBER", "DECEMBER", "QUARTER",
+    "WS"
+]
+
+
+
+
+class GrocParser(Parser):
+    grammarFileName = "Groc.g"
+    antlr_version = version_str_to_tuple("3.1.1")
+    antlr_version_str = "3.1.1"
+    tokenNames = tokenNames
+
+    def __init__(self, input, state=None):
+        if state is None:
+            state = RecognizerSharedState()
+
+        Parser.__init__(self, input, state)
+
+
+
+
+
+        self.ordinal_set = set()
+        self.weekday_set = set()
+        self.month_set = set()
+        self.time_string = '';
+        self.interval_mins = 0;
+        self.period_string = '';
+
+
+
+
+
+
+
+
+
+
+    valuesDict = {
+        SUNDAY: 0,
+        FIRST: 1,
+        MONDAY: 1,
+        JANUARY: 1,
+        TUESDAY: 2,
+        SECOND: 2,
+        FEBRUARY: 2,
+        WEDNESDAY: 3,
+        THIRD: 3,
+        MARCH: 3,
+        THURSDAY: 4,
+        FOURTH: 4,
+        APRIL: 4,
+        FRIDAY: 5,
+        FIFTH: 5,
+        MAY: 5,
+        SATURDAY: 6,
+        JUNE: 6,
+        JULY: 7,
+        AUGUST: 8,
+        SEPTEMBER: 9,
+        OCTOBER: 10,
+        NOVEMBER: 11,
+        DECEMBER: 12,
+      }
+
+    def ValueOf(self, token_type):
+      return self.valuesDict.get(token_type, -1)
+
+
+
+
+    def timespec(self, ):
+
+        try:
+            try:
+                pass
+                alt1 = 2
+                LA1_0 = self.input.LA(1)
+
+                if (LA1_0 == EVERY) :
+                    LA1_1 = self.input.LA(2)
+
+                    if ((DIGIT <= LA1_1 <= DIGITS)) :
+                        alt1 = 2
+                    elif ((MONDAY <= LA1_1 <= SUNDAY)) :
+                        alt1 = 1
+                    else:
+                        nvae = NoViableAltException("", 1, 1, self.input)
+
+                        raise nvae
+
+                elif ((FIRST <= LA1_0 <= FOURTH_OR_FIFTH)) :
+                    alt1 = 1
+                else:
+                    nvae = NoViableAltException("", 1, 0, self.input)
+
+                    raise nvae
+
+                if alt1 == 1:
+                    pass
+                    self._state.following.append(self.FOLLOW_specifictime_in_timespec44)
+                    self.specifictime()
+
+                    self._state.following.pop()
+
+
+                elif alt1 == 2:
+                    pass
+                    self._state.following.append(self.FOLLOW_interval_in_timespec48)
+                    self.interval()
+
+                    self._state.following.pop()
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def specifictime(self, ):
+
+        TIME1 = None
+
+        try:
+            try:
+                pass
+                pass
+                pass
+                pass
+                pass
+                self._state.following.append(self.FOLLOW_ordinals_in_specifictime69)
+                self.ordinals()
+
+                self._state.following.pop()
+                self._state.following.append(self.FOLLOW_weekdays_in_specifictime71)
+                self.weekdays()
+
+                self._state.following.pop()
+
+
+
+
+
+
+                self.match(self.input, OF, self.FOLLOW_OF_in_specifictime75)
+                alt2 = 2
+                LA2_0 = self.input.LA(1)
+
+                if ((MONTH <= LA2_0 <= DECEMBER)) :
+                    alt2 = 1
+                elif ((FIRST <= LA2_0 <= THIRD) or LA2_0 == QUARTER) :
+                    alt2 = 2
+                else:
+                    nvae = NoViableAltException("", 2, 0, self.input)
+
+                    raise nvae
+
+                if alt2 == 1:
+                    pass
+                    self._state.following.append(self.FOLLOW_monthspec_in_specifictime78)
+                    self.monthspec()
+
+                    self._state.following.pop()
+
+
+                elif alt2 == 2:
+                    pass
+                    self._state.following.append(self.FOLLOW_quarterspec_in_specifictime80)
+                    self.quarterspec()
+
+                    self._state.following.pop()
+
+
+
+
+
+
+                TIME1=self.match(self.input, TIME, self.FOLLOW_TIME_in_specifictime93)
+                self.time_string = TIME1.text
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def interval(self, ):
+
+        intervalnum = None
+        period2 = None
+
+
+        try:
+            try:
+                pass
+                pass
+                self.match(self.input, EVERY, self.FOLLOW_EVERY_in_interval112)
+                intervalnum = self.input.LT(1)
+                if (DIGIT <= self.input.LA(1) <= DIGITS):
+                    self.input.consume()
+                    self._state.errorRecovery = False
+
+                else:
+                    mse = MismatchedSetException(None, self.input)
+                    raise mse
+
+
+
+                self.interval_mins = int(intervalnum.text)
+
+                self._state.following.append(self.FOLLOW_period_in_interval138)
+                period2 = self.period()
+
+                self._state.following.pop()
+
+                if ((period2 is not None) and [self.input.toString(period2.start,period2.stop)] or [None])[0] == "hours":
+                  self.period_string = "hours"
+                else:
+                  self.period_string = "minutes"
+
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def ordinals(self, ):
+
+        try:
+            try:
+                pass
+                alt4 = 2
+                LA4_0 = self.input.LA(1)
+
+                if (LA4_0 == EVERY) :
+                    alt4 = 1
+                elif ((FIRST <= LA4_0 <= FOURTH_OR_FIFTH)) :
+                    alt4 = 2
+                else:
+                    nvae = NoViableAltException("", 4, 0, self.input)
+
+                    raise nvae
+
+                if alt4 == 1:
+                    pass
+                    self.match(self.input, EVERY, self.FOLLOW_EVERY_in_ordinals157)
+                    self.ordinal_set = self.ordinal_set.union(allOrdinals)
+
+
+                elif alt4 == 2:
+                    pass
+                    pass
+                    self._state.following.append(self.FOLLOW_ordinal_in_ordinals173)
+                    self.ordinal()
+
+                    self._state.following.pop()
+                    while True:
+                        alt3 = 2
+                        LA3_0 = self.input.LA(1)
+
+                        if (LA3_0 == COMMA) :
+                            alt3 = 1
+
+
+                        if alt3 == 1:
+                            pass
+                            self.match(self.input, COMMA, self.FOLLOW_COMMA_in_ordinals176)
+                            self._state.following.append(self.FOLLOW_ordinal_in_ordinals178)
+                            self.ordinal()
+
+                            self._state.following.pop()
+
+
+                        else:
+                            break
+
+
+
+
+
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def ordinal(self, ):
+
+        ord = None
+
+        try:
+            try:
+                pass
+                ord = self.input.LT(1)
+                if (FIRST <= self.input.LA(1) <= FOURTH_OR_FIFTH):
+                    self.input.consume()
+                    self._state.errorRecovery = False
+
+                else:
+                    mse = MismatchedSetException(None, self.input)
+                    raise mse
+
+
+
+                self.ordinal_set.add(self.ValueOf(ord.type));
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+    class period_return(ParserRuleReturnScope):
+        def __init__(self):
+            ParserRuleReturnScope.__init__(self)
+
+
+
+
+
+    def period(self, ):
+
+        retval = self.period_return()
+        retval.start = self.input.LT(1)
+
+        try:
+            try:
+                pass
+                if (HOURS <= self.input.LA(1) <= MINUTES):
+                    self.input.consume()
+                    self._state.errorRecovery = False
+
+                else:
+                    mse = MismatchedSetException(None, self.input)
+                    raise mse
+
+
+
+
+
+                retval.stop = self.input.LT(-1)
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return retval
+
+
+
+    def weekdays(self, ):
+
+        try:
+            try:
+                pass
+                pass
+                self._state.following.append(self.FOLLOW_weekday_in_weekdays261)
+                self.weekday()
+
+                self._state.following.pop()
+                while True:
+                    alt5 = 2
+                    LA5_0 = self.input.LA(1)
+
+                    if (LA5_0 == COMMA) :
+                        alt5 = 1
+
+
+                    if alt5 == 1:
+                        pass
+                        self.match(self.input, COMMA, self.FOLLOW_COMMA_in_weekdays264)
+                        self._state.following.append(self.FOLLOW_weekday_in_weekdays266)
+                        self.weekday()
+
+                        self._state.following.pop()
+
+
+                    else:
+                        break
+
+
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def weekday(self, ):
+
+        dayname = None
+
+        try:
+            try:
+                pass
+                dayname = self.input.LT(1)
+                if (MONDAY <= self.input.LA(1) <= SUNDAY):
+                    self.input.consume()
+                    self._state.errorRecovery = False
+
+                else:
+                    mse = MismatchedSetException(None, self.input)
+                    raise mse
+
+
+
+                self.weekday_set.add(self.ValueOf(dayname.type))
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def monthspec(self, ):
+
+        try:
+            try:
+                pass
+                alt6 = 2
+                LA6_0 = self.input.LA(1)
+
+                if (LA6_0 == MONTH) :
+                    alt6 = 1
+                elif ((JANUARY <= LA6_0 <= DECEMBER)) :
+                    alt6 = 2
+                else:
+                    nvae = NoViableAltException("", 6, 0, self.input)
+
+                    raise nvae
+
+                if alt6 == 1:
+                    pass
+                    self.match(self.input, MONTH, self.FOLLOW_MONTH_in_monthspec344)
+
+                    self.month_set = self.month_set.union(set([
+                        self.ValueOf(JANUARY), self.ValueOf(FEBRUARY), self.ValueOf(MARCH),
+                        self.ValueOf(APRIL), self.ValueOf(MAY), self.ValueOf(JUNE),
+                        self.ValueOf(JULY), self.ValueOf(AUGUST), self.ValueOf(SEPTEMBER),
+                        self.ValueOf(OCTOBER), self.ValueOf(NOVEMBER),
+                        self.ValueOf(DECEMBER)]))
+
+
+
+                elif alt6 == 2:
+                    pass
+                    self._state.following.append(self.FOLLOW_months_in_monthspec354)
+                    self.months()
+
+                    self._state.following.pop()
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def months(self, ):
+
+        try:
+            try:
+                pass
+                pass
+                self._state.following.append(self.FOLLOW_month_in_months371)
+                self.month()
+
+                self._state.following.pop()
+                while True:
+                    alt7 = 2
+                    LA7_0 = self.input.LA(1)
+
+                    if (LA7_0 == COMMA) :
+                        alt7 = 1
+
+
+                    if alt7 == 1:
+                        pass
+                        self.match(self.input, COMMA, self.FOLLOW_COMMA_in_months374)
+                        self._state.following.append(self.FOLLOW_month_in_months376)
+                        self.month()
+
+                        self._state.following.pop()
+
+
+                    else:
+                        break
+
+
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def month(self, ):
+
+        monthname = None
+
+        try:
+            try:
+                pass
+                monthname = self.input.LT(1)
+                if (JANUARY <= self.input.LA(1) <= DECEMBER):
+                    self.input.consume()
+                    self._state.errorRecovery = False
+
+                else:
+                    mse = MismatchedSetException(None, self.input)
+                    raise mse
+
+
+                self.month_set.add(self.ValueOf(monthname.type));
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def quarterspec(self, ):
+
+        try:
+            try:
+                pass
+                alt8 = 2
+                LA8_0 = self.input.LA(1)
+
+                if (LA8_0 == QUARTER) :
+                    alt8 = 1
+                elif ((FIRST <= LA8_0 <= THIRD)) :
+                    alt8 = 2
+                else:
+                    nvae = NoViableAltException("", 8, 0, self.input)
+
+                    raise nvae
+
+                if alt8 == 1:
+                    pass
+                    self.match(self.input, QUARTER, self.FOLLOW_QUARTER_in_quarterspec468)
+
+                    self.month_set = self.month_set.union(set([
+                        self.ValueOf(JANUARY), self.ValueOf(APRIL), self.ValueOf(JULY),
+                        self.ValueOf(OCTOBER)]))
+
+
+                elif alt8 == 2:
+                    pass
+                    pass
+                    self._state.following.append(self.FOLLOW_quarter_ordinals_in_quarterspec480)
+                    self.quarter_ordinals()
+
+                    self._state.following.pop()
+                    self.match(self.input, MONTH, self.FOLLOW_MONTH_in_quarterspec482)
+                    self.match(self.input, OF, self.FOLLOW_OF_in_quarterspec484)
+                    self.match(self.input, QUARTER, self.FOLLOW_QUARTER_in_quarterspec486)
+
+
+
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def quarter_ordinals(self, ):
+
+        try:
+            try:
+                pass
+                pass
+                self._state.following.append(self.FOLLOW_month_of_quarter_ordinal_in_quarter_ordinals505)
+                self.month_of_quarter_ordinal()
+
+                self._state.following.pop()
+                while True:
+                    alt9 = 2
+                    LA9_0 = self.input.LA(1)
+
+                    if (LA9_0 == COMMA) :
+                        alt9 = 1
+
+
+                    if alt9 == 1:
+                        pass
+                        self.match(self.input, COMMA, self.FOLLOW_COMMA_in_quarter_ordinals508)
+                        self._state.following.append(self.FOLLOW_month_of_quarter_ordinal_in_quarter_ordinals510)
+                        self.month_of_quarter_ordinal()
+
+                        self._state.following.pop()
+
+
+                    else:
+                        break
+
+
+
+
+
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+    def month_of_quarter_ordinal(self, ):
+
+        offset = None
+
+        try:
+            try:
+                pass
+                offset = self.input.LT(1)
+                if (FIRST <= self.input.LA(1) <= THIRD):
+                    self.input.consume()
+                    self._state.errorRecovery = False
+
+                else:
+                    mse = MismatchedSetException(None, self.input)
+                    raise mse
+
+
+
+                jOffset = self.ValueOf(offset.type) - 1
+                self.month_set = self.month_set.union(set([
+                    jOffset + self.ValueOf(JANUARY), jOffset + self.ValueOf(APRIL),
+                    jOffset + self.ValueOf(JULY), jOffset + self.ValueOf(OCTOBER)]))
+
+
+
+
+            except RecognitionException, re:
+                self.reportError(re)
+                self.recover(self.input, re)
+        finally:
+
+            pass
+
+        return
+
+
+
+
+
+
+
+    FOLLOW_specifictime_in_timespec44 = frozenset([1])
+    FOLLOW_interval_in_timespec48 = frozenset([1])
+    FOLLOW_ordinals_in_specifictime69 = frozenset([18, 19, 20, 21, 22, 23, 24])
+    FOLLOW_weekdays_in_specifictime71 = frozenset([4])
+    FOLLOW_OF_in_specifictime75 = frozenset([10, 11, 12, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38])
+    FOLLOW_monthspec_in_specifictime78 = frozenset([5])
+    FOLLOW_quarterspec_in_specifictime80 = frozenset([5])
+    FOLLOW_TIME_in_specifictime93 = frozenset([1])
+    FOLLOW_EVERY_in_interval112 = frozenset([7, 8])
+    FOLLOW_set_in_interval122 = frozenset([16, 17])
+    FOLLOW_period_in_interval138 = frozenset([1])
+    FOLLOW_EVERY_in_ordinals157 = frozenset([1])
+    FOLLOW_ordinal_in_ordinals173 = frozenset([1, 9])
+    FOLLOW_COMMA_in_ordinals176 = frozenset([10, 11, 12, 13, 14, 15])
+    FOLLOW_ordinal_in_ordinals178 = frozenset([1, 9])
+    FOLLOW_set_in_ordinal199 = frozenset([1])
+    FOLLOW_set_in_period238 = frozenset([1])
+    FOLLOW_weekday_in_weekdays261 = frozenset([1, 9])
+    FOLLOW_COMMA_in_weekdays264 = frozenset([18, 19, 20, 21, 22, 23, 24])
+    FOLLOW_weekday_in_weekdays266 = frozenset([1, 9])
+    FOLLOW_set_in_weekday285 = frozenset([1])
+    FOLLOW_MONTH_in_monthspec344 = frozenset([1])
+    FOLLOW_months_in_monthspec354 = frozenset([1])
+    FOLLOW_month_in_months371 = frozenset([1, 9])
+    FOLLOW_COMMA_in_months374 = frozenset([25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37])
+    FOLLOW_month_in_months376 = frozenset([1, 9])
+    FOLLOW_set_in_month395 = frozenset([1])
+    FOLLOW_QUARTER_in_quarterspec468 = frozenset([1])
+    FOLLOW_quarter_ordinals_in_quarterspec480 = frozenset([25])
+    FOLLOW_MONTH_in_quarterspec482 = frozenset([4])
+    FOLLOW_OF_in_quarterspec484 = frozenset([38])
+    FOLLOW_QUARTER_in_quarterspec486 = frozenset([1])
+    FOLLOW_month_of_quarter_ordinal_in_quarter_ordinals505 = frozenset([1, 9])
+    FOLLOW_COMMA_in_quarter_ordinals508 = frozenset([10, 11, 12, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38])
+    FOLLOW_month_of_quarter_ordinal_in_quarter_ordinals510 = frozenset([1, 9])
+    FOLLOW_set_in_month_of_quarter_ordinal529 = frozenset([1])
+
+
+
+def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
+    from antlr3.main import ParserMain
+    main = ParserMain("GrocLexer", GrocParser)
+    main.stdin = stdin
+    main.stdout = stdout
+    main.stderr = stderr
+    main.execute(argv)
+
+
+if __name__ == '__main__':
+    main(sys.argv)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/cron/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"this file is needed to make this a package"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/cron/groc.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""A wrapper around the generated Groc parser and lexer."""
+
+
+import google
+
+import antlr3
+
+import GrocLexer
+import GrocParser
+
+
+class GrocException(Exception):
+  """An error occurred while parsing the groc input string."""
+
+
+class GrocLexerWithErrors(GrocLexer.GrocLexer):
+  """An overridden Lexer that raises exceptions."""
+
+  def emitErrorMessage(self, msg):
+    """Raise an exception if the input fails to parse correctly.
+
+    Overriding the default, which normally just prints a message to
+    stderr.
+
+    Arguments:
+      msg: the error message
+    Raises:
+      GrocException: always.
+    """
+    raise GrocException(msg)
+
+
+class GrocParserWithErrors(GrocParser.GrocParser):
+  """An overridden Parser that raises exceptions."""
+
+  def emitErrorMessage(self, msg):
+    """Raise an exception if the input fails to parse correctly.
+
+    Overriding the default, which normally just prints a message to
+    stderr.
+
+    Arguments:
+      msg: the error message
+    Raises:
+      GrocException: always.
+    """
+    raise GrocException(msg)
+
+
+def CreateParser(parse_string):
+  """Creates a Groc Parser."""
+  input_string = antlr3.ANTLRStringStream(parse_string)
+  lexer = GrocLexerWithErrors(input_string)
+  tokens = antlr3.CommonTokenStream(lexer)
+  parser = GrocParserWithErrors(tokens)
+  return parser
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/cron/groctimespecification.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""Implementation of scheduling for Groc format schedules.
+
+A Groc schedule looks like '1st,2nd monday 9:00', or 'every 20 mins'. This
+module takes a parsed schedule (produced by Antlr) and creates objects that
+can produce times that match this schedule.
+
+A parsed schedule is one of two types - an Interval, and a Specific Time.
+See the class docstrings for more.
+
+Extensions to be considered:
+
+  allowing a comma separated list of times to run
+  allowing the user to specify particular days of the month to run
+
+"""
+
+
+import calendar
+import datetime
+
+import groc
+
+HOURS = 'hours'
+MINUTES = 'minutes'
+
+
+def GrocTimeSpecification(schedule):
+  """Factory function.
+
+  Turns a schedule specification into a TimeSpecification.
+
+  Arguments:
+    schedule: the schedule specification, as a string
+
+  Returns:
+    a TimeSpecification instance
+  """
+
+  parser = groc.CreateParser(schedule)
+  parser.timespec()
+
+  if parser.interval_mins:
+    return IntervalTimeSpecification(parser.interval_mins, parser.period_string)
+  else:
+    return SpecificTimeSpecification(parser.ordinal_set, parser.weekday_set,
+                                     parser.month_set, None, parser.time_string)
+
+
+class TimeSpecification(object):
+  """Base class for time specifications."""
+
+  def GetMatches(self, start, n):
+    """Returns the next n times that match the schedule, starting at time start.
+
+    Arguments:
+      start: a datetime to start from. Matches will start from after this time
+      n:     the number of matching times to return
+
+    Returns:
+      a list of n datetime objects
+    """
+    out = []
+    for _ in range(n):
+      start = self.GetMatch(start)
+      out.append(start)
+    return out
+
+  def GetMatch(self, start):
+    """Returns the next match after time start.
+
+    Must be implemented in subclasses.
+
+    Arguments:
+      start: a datetime to start with. Matches will start from this time
+
+    Returns:
+      a datetime object
+    """
+    raise NotImplementedError
+
+
+class IntervalTimeSpecification(TimeSpecification):
+  """A time specification for a given interval.
+
+  An Interval type spec runs at the given fixed interval. They have two
+  attributes:
+  period   - the type of interval, either "hours" or "minutes"
+  interval - the number of units of type period.
+  """
+
+  def __init__(self, interval, period):
+    super(IntervalTimeSpecification, self).__init__(self)
+    self.interval = interval
+    self.period = period
+
+  def GetMatch(self, t):
+    """Returns the next match after time 't'.
+
+    Arguments:
+      t: a datetime to start from. Matches will start from after this time
+
+    Returns:
+      a datetime object
+    """
+    if self.period == HOURS:
+      return t + datetime.timedelta(hours=self.interval)
+    else:
+      return t + datetime.timedelta(minutes=self.interval)
+
+
+class SpecificTimeSpecification(TimeSpecification):
+  """Specific time specification.
+
+  A Specific interval is more complex, but define a certain time to run, on
+  given days. They have the following attributes:
+  time     - the time of day to run, as "HH:MM"
+  ordinals - first, second, third &c, as a set of integers in 1..5
+  months   - the months that this is valid, as a set of integers in 1..12
+  weekdays - the days of the week to run this, 0=Sunday, 6=Saturday.
+
+  The specific time interval can be quite complex. A schedule could look like
+  this:
+  "1st,third sat,sun of jan,feb,mar 09:15"
+
+  In this case, ordinals would be [1,3], weekdays [0,6], months [1,2,3] and time
+  would be "09:15".
+  """
+
+  def __init__(self, ordinals=None, weekdays=None, months=None, monthdays=None,
+               timestr='00:00'):
+    super(SpecificTimeSpecification, self).__init__(self)
+    if weekdays and monthdays:
+      raise ValueError("can't supply both monthdays and weekdays")
+    if ordinals is None:
+      self.ordinals = set(range(1, 6))
+    else:
+      self.ordinals = ordinals
+
+    if weekdays is None:
+      self.weekdays = set(range(7))
+    else:
+      self.weekdays = weekdays
+
+    if months is None:
+      self.months = set(range(1, 13))
+    else:
+      self.months = months
+
+    if monthdays is None:
+      self.monthdays = set()
+    else:
+      self.monthdays = monthdays
+    hourstr, minutestr = timestr.split(':')
+    self.time = datetime.time(int(hourstr), int(minutestr))
+
+  def _MatchingDays(self, year, month):
+    """Returns matching days for the given year and month.
+
+    For the given year and month, return the days that match this instance's
+    day specification, based on the ordinals and weekdays.
+
+    Arguments:
+      year: the year as an integer
+      month: the month as an integer, in range 1-12
+
+    Returns:
+      a list of matching days, as ints in range 1-31
+    """
+    out_days = []
+    start_day, last_day = calendar.monthrange(year, month)
+    start_day = (start_day + 1) % 7
+    for ordinal in self.ordinals:
+      for weekday in self.weekdays:
+        day = ((weekday - start_day) % 7) + 1
+        day += 7 * (ordinal - 1)
+        if day <= last_day:
+          out_days.append(day)
+    return sorted(out_days)
+
+  def _NextMonthGenerator(self, start, matches):
+    """Creates a generator that produces results from the set 'matches'.
+
+    Matches must be >= 'start'. If none match, the wrap counter is incremented,
+    and the result set is reset to the full set. Yields a 2-tuple of (match,
+    wrapcount).
+
+    Arguments:
+      start: first set of matches will be >= this value (an int)
+      matches: the set of potential matches (a sequence of ints)
+
+    Yields:
+      a two-tuple of (match, wrap counter). match is an int in range (1-12),
+      wrapcount is a int indicating how many times we've wrapped around.
+    """
+    potential = matches = sorted(matches)
+    after = start - 1
+    wrapcount = 0
+    while True:
+      potential = [x for x in potential if x > after]
+      if not potential:
+        wrapcount += 1
+        potential = matches
+      after = potential[0]
+      yield (after, wrapcount)
+
+  def GetMatch(self, start):
+    """Returns the next time that matches the schedule after time start.
+
+    Arguments:
+      start: a datetime to start with. Matches will start after this time
+
+    Returns:
+      a datetime object
+    """
+    start_time = start
+    if self.months:
+      months = self._NextMonthGenerator(start.month, self.months)
+    while True:
+      month, yearwraps = months.next()
+      candidate = start_time.replace(day=1, month=month,
+                                     year=start_time.year + yearwraps)
+
+      if self.monthdays:
+        _, last_day = calendar.monthrange(candidate.year, candidate.month)
+        day_matches = sorted([x for x in self.monthdays if x <= last_day])
+      else:
+        day_matches = self._MatchingDays(candidate.year, month)
+
+      if ((candidate.year, candidate.month)
+          == (start_time.year, start_time.month)):
+        day_matches = [x for x in day_matches if x >= start_time.day]
+        if day_matches and day_matches[0] == start_time.day:
+          if start_time.time() >= self.time:
+            day_matches.pop(0)
+      if not day_matches:
+        continue
+      out = candidate.replace(day=day_matches[0], hour=self.time.hour,
+                              minute=self.time.minute, second=0, microsecond=0)
+      return out
--- a/thirdparty/google_appengine/google/appengine/datastore/datastore_index.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/datastore/datastore_index.py	Tue Jan 20 13:19:45 2009 +0000
@@ -332,6 +332,11 @@
       (not props or (len(props) == 1 and props[0][1] == ASCENDING))):
     required = False
 
+    if props:
+      prop, dir = props[0]
+      if prop in datastore_types._SPECIAL_PROPERTIES and dir is DESCENDING:
+        required = True
+
   unique_names = set(name for name, dir in props)
   if len(props) > 1 and len(unique_names) == 1:
     required = False
--- a/thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -60,12 +60,6 @@
     if self.has_handle_ and self.handle_ != x.handle_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_handle_):
@@ -188,12 +182,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_op_):
@@ -312,12 +300,6 @@
     if self.has_direction_ and self.direction_ != x.direction_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_property_):
@@ -595,12 +577,6 @@
     if self.has_require_perfect_plan_ and self.require_perfect_plan_ != x.require_perfect_plan_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_app_):
@@ -941,12 +917,6 @@
     if self.has_native_limit_ and self.native_limit_ != x.native_limit_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.native_index_:
@@ -1077,12 +1047,6 @@
     if self.has_cursor_ and self.cursor_ != x.cursor_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_cursor_):
@@ -1164,12 +1128,6 @@
     if x is self: return 1
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     return initialized
@@ -1206,6 +1164,83 @@
 
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
+class Cost(ProtocolBuffer.ProtocolMessage):
+  has_index_writes_ = 0
+  index_writes_ = 0
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def index_writes(self): return self.index_writes_
+
+  def set_index_writes(self, x):
+    self.has_index_writes_ = 1
+    self.index_writes_ = x
+
+  def clear_index_writes(self):
+    self.has_index_writes_ = 0
+    self.index_writes_ = 0
+
+  def has_index_writes(self): return self.has_index_writes_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_index_writes()): self.set_index_writes(x.index_writes())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_index_writes_ != x.has_index_writes_: return 0
+    if self.has_index_writes_ and self.index_writes_ != x.index_writes_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    if (self.has_index_writes_): n += 1 + self.lengthVarInt64(self.index_writes_)
+    return n + 0
+
+  def Clear(self):
+    self.clear_index_writes()
+
+  def OutputUnchecked(self, out):
+    if (self.has_index_writes_):
+      out.putVarInt32(8)
+      out.putVarInt32(self.index_writes_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 8:
+        self.set_index_writes(d.getVarInt32())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_index_writes_: res+=prefix+("index_writes: %s\n" % self.DebugFormatInt32(self.index_writes_))
+    return res
+
+  kindex_writes = 1
+
+  _TEXT = (
+   "ErrorCode",
+   "index_writes",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.NUMERIC,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
 class GetRequest(ProtocolBuffer.ProtocolMessage):
   has_transaction_ = 0
   transaction_ = None
@@ -1263,12 +1298,6 @@
     if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.key_:
@@ -1387,12 +1416,6 @@
     if self.has_entity_ and self.entity_ != x.entity_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (self.has_entity_ and not self.entity_.IsInitialized(debug_strs)): initialized = 0
@@ -1468,12 +1491,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.entity_:
@@ -1614,12 +1631,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.entity_:
@@ -1730,9 +1741,12 @@
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
 class PutResponse(ProtocolBuffer.ProtocolMessage):
+  has_cost_ = 0
+  cost_ = None
 
   def __init__(self, contents=None):
     self.key_ = []
+    self.lazy_init_lock_ = thread.allocate_lock()
     if contents is not None: self.MergeFromString(contents)
 
   def key_size(self): return len(self.key_)
@@ -1751,44 +1765,65 @@
 
   def clear_key(self):
     self.key_ = []
+  def cost(self):
+    if self.cost_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.cost_ is None: self.cost_ = Cost()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.cost_
+
+  def mutable_cost(self): self.has_cost_ = 1; return self.cost()
+
+  def clear_cost(self):
+    self.has_cost_ = 0;
+    if self.cost_ is not None: self.cost_.Clear()
+
+  def has_cost(self): return self.has_cost_
+
 
   def MergeFrom(self, x):
     assert x is not self
     for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
+    if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
 
   def Equals(self, x):
     if x is self: return 1
     if len(self.key_) != len(x.key_): return 0
     for e1, e2 in zip(self.key_, x.key_):
       if e1 != e2: return 0
+    if self.has_cost_ != x.has_cost_: return 0
+    if self.has_cost_ and self.cost_ != x.cost_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.key_:
       if not p.IsInitialized(debug_strs): initialized=0
+    if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
     return initialized
 
   def ByteSize(self):
     n = 0
     n += 1 * len(self.key_)
     for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
+    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
     return n + 0
 
   def Clear(self):
     self.clear_key()
+    self.clear_cost()
 
   def OutputUnchecked(self, out):
     for i in xrange(len(self.key_)):
       out.putVarInt32(10)
       out.putVarInt32(self.key_[i].ByteSize())
       self.key_[i].OutputUnchecked(out)
+    if (self.has_cost_):
+      out.putVarInt32(18)
+      out.putVarInt32(self.cost_.ByteSize())
+      self.cost_.OutputUnchecked(out)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -1799,6 +1834,12 @@
         d.skip(length)
         self.add_key().TryMerge(tmp)
         continue
+      if tt == 18:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_cost().TryMerge(tmp)
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -1813,19 +1854,27 @@
       res+=e.__str__(prefix + "  ", printElemNumber)
       res+=prefix+">\n"
       cnt+=1
+    if self.has_cost_:
+      res+=prefix+"cost <\n"
+      res+=self.cost_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
     return res
 
   kkey = 1
+  kcost = 2
 
   _TEXT = (
    "ErrorCode",
    "key",
+   "cost",
   )
 
   _TYPES = (
    ProtocolBuffer.Encoder.NUMERIC,
    ProtocolBuffer.Encoder.STRING,
 
+   ProtocolBuffer.Encoder.STRING,
+
   )
 
   _STYLE = """"""
@@ -1887,12 +1936,6 @@
     if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.key_:
@@ -1987,6 +2030,97 @@
 
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
+class DeleteResponse(ProtocolBuffer.ProtocolMessage):
+  has_cost_ = 0
+  cost_ = None
+
+  def __init__(self, contents=None):
+    self.lazy_init_lock_ = thread.allocate_lock()
+    if contents is not None: self.MergeFromString(contents)
+
+  def cost(self):
+    if self.cost_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.cost_ is None: self.cost_ = Cost()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.cost_
+
+  def mutable_cost(self): self.has_cost_ = 1; return self.cost()
+
+  def clear_cost(self):
+    self.has_cost_ = 0;
+    if self.cost_ is not None: self.cost_.Clear()
+
+  def has_cost(self): return self.has_cost_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_cost_ != x.has_cost_: return 0
+    if self.has_cost_ and self.cost_ != x.cost_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
+    return n + 0
+
+  def Clear(self):
+    self.clear_cost()
+
+  def OutputUnchecked(self, out):
+    if (self.has_cost_):
+      out.putVarInt32(10)
+      out.putVarInt32(self.cost_.ByteSize())
+      self.cost_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_cost().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_cost_:
+      res+=prefix+"cost <\n"
+      res+=self.cost_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kcost = 1
+
+  _TEXT = (
+   "ErrorCode",
+   "cost",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
 class NextRequest(ProtocolBuffer.ProtocolMessage):
   has_cursor_ = 0
   has_count_ = 0
@@ -2030,12 +2164,6 @@
     if self.has_count_ and self.count_ != x.count_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_cursor_):
@@ -2181,12 +2309,6 @@
     if self.has_more_results_ and self.more_results_ != x.more_results_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (self.has_cursor_ and not self.cursor_.IsInitialized(debug_strs)): initialized = 0
@@ -2318,12 +2440,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.kind_:
@@ -2419,12 +2535,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.index_:
@@ -2487,4 +2597,4 @@
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
 
-__all__ = ['Transaction','Query','Query_Filter','Query_Order','QueryExplanation','Cursor','Error','GetRequest','GetResponse','GetResponse_Entity','PutRequest','PutResponse','DeleteRequest','NextRequest','QueryResult','Schema','CompositeIndices']
+__all__ = ['Transaction','Query','Query_Filter','Query_Order','QueryExplanation','Cursor','Error','Cost','GetRequest','GetResponse','GetResponse_Entity','PutRequest','PutResponse','DeleteRequest','DeleteResponse','NextRequest','QueryResult','Schema','CompositeIndices']
--- a/thirdparty/google_appengine/google/appengine/datastore/entity_pb.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/datastore/entity_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -86,12 +86,6 @@
     if self.has_name_ and self.name_ != x.name_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_type_):
@@ -193,12 +187,6 @@
     if self.has_y_ and self.y_ != x.y_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_x_):
@@ -326,12 +314,6 @@
     if self.has_gaiaid_ and self.gaiaid_ != x.gaiaid_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_email_):
@@ -452,12 +434,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_app_):
@@ -662,12 +638,6 @@
     if self.has_referencevalue_ and self.referencevalue_ != x.referencevalue_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (self.has_pointvalue_ and not self.pointvalue_.IsInitialized(debug_strs)): initialized = 0
@@ -856,6 +826,7 @@
 
   BLOB         =   14
   TEXT         =   15
+  BYTESTRING   =   16
   ATOM_CATEGORY =    1
   ATOM_LINK    =    2
   ATOM_TITLE   =    3
@@ -873,6 +844,7 @@
   _Meaning_NAMES = {
     14: "BLOB",
     15: "TEXT",
+    16: "BYTESTRING",
     1: "ATOM_CATEGORY",
     2: "ATOM_LINK",
     3: "ATOM_TITLE",
@@ -984,12 +956,6 @@
     if self.has_multiple_ and self.multiple_ != x.multiple_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_name_):
@@ -1167,12 +1133,6 @@
     if self.has_name_ and self.name_ != x.name_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_type_):
@@ -1261,12 +1221,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     for p in self.element_:
@@ -1380,12 +1334,6 @@
     if self.has_path_ and self.path_ != x.path_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_app_):
@@ -1577,12 +1525,6 @@
     if self.has_gaiaid_ and self.gaiaid_ != x.gaiaid_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_email_):
@@ -1830,12 +1772,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_key_):
@@ -2099,12 +2035,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_index_id_):
@@ -2233,12 +2163,6 @@
     if self.has_direction_ and self.direction_ != x.direction_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_name_):
@@ -2352,12 +2276,6 @@
       if e1 != e2: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_entity_type_):
@@ -2548,12 +2466,6 @@
     if self.has_state_ and self.state_ != x.state_: return 0
     return 1
 
-  def __eq__(self, other):
-    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
-
-  def __ne__(self, other):
-    return not (self == other)
-
   def IsInitialized(self, debug_strs=None):
     initialized = 1
     if (not self.has_app_id_):
--- a/thirdparty/google_appengine/google/appengine/ext/admin/__init__.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -777,7 +777,8 @@
       multiline = len(value) > 255 or value.find('\n') >= 0
     if not multiline:
       for sample_value in sample_values:
-        if len(sample_value) > 255 or sample_value.find('\n') >= 0:
+        if sample_value and (len(sample_value) > 255 or
+                             sample_value.find('\n') >= 0):
           multiline = True
           break
     if multiline:
--- a/thirdparty/google_appengine/google/appengine/ext/admin/templates/datastore.html	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/templates/datastore.html	Tue Jan 20 13:19:45 2009 +0000
@@ -123,13 +123,13 @@
           <th>ID</th>
           <th>Key Name</th>
           {% for header in headers %}
-            <th style="cursor: pointer" onclick="document.location.href='{{ order_base_url }}&amp;order={% ifequal order header.name %}-{% endifequal %}{{ header.name|urlencode }}&amp;order_type={{ header.type|urlencode }}'"><a href="{{ order_base_url }}&amp;order={% ifequal order header.name %}-{% endifequal %}{{ header.name|urlencode }}&amp;order_type={{ header.type|urlencode }}">{{ header.name }}</a></th>
+            <th style="cursor: pointer" onclick="document.location.href='{{ order_base_url }}&amp;order={% ifequal order header.name %}-{% endifequal %}{{ header.name|urlencode }}&amp;order_type={{ header.type|urlencode }}'"><a href="{{ order_base_url }}&amp;order={% ifequal order header.name %}-{% endifequal %}{{ header.name|urlencode }}&amp;order_type={{ header.type|urlencode }}" onclick="return false">{{ header.name }}</a></th>
           {% endfor %}
         </tr>
         {% for entity in entities %}
           <tr class="{% if forloop.counter|divisibleby:2 %}even{% else %}odd{% endif %}">
             <td><input id="key{{ forloop.counter }}" type="checkbox" name="key{{ forloop.counter }}" value="{{ entity.key|escape }}" onclick="updateDeleteButtonAndCheckbox();"/></td>
-            <td onclick="location.href='{{ entity.edit_uri|escape }}'"><a href="{{ entity.edit_uri|escape }}" title="Edit entity #{{ entity.key|escape }}">{{ entity.shortened_key|escape }}</a></td>
+            <td onclick="location.href='{{ entity.edit_uri|escape }}'"><a href="{{ entity.edit_uri|escape }}" title="Edit entity #{{ entity.key|escape }}" onclick="return false">{{ entity.shortened_key|escape }}</a></td>
             <td>
               {% if entity.key_id %}
                 {{entity.key_id}}
--- a/thirdparty/google_appengine/google/appengine/ext/bulkload/__init__.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/bulkload/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -401,6 +401,7 @@
         response code: integer HTTP response code to return
         output: string containing the HTTP response body
     """
+    data = data.encode('utf-8')
     Validate(kind, basestring)
     Validate(data, basestring)
     output = []
--- a/thirdparty/google_appengine/google/appengine/ext/db/__init__.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/db/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -117,7 +117,7 @@
 Rating = datastore_types.Rating
 Text = datastore_types.Text
 Blob = datastore_types.Blob
-
+ByteString = datastore_types.ByteString
 
 _kind_map = {}
 
@@ -171,6 +171,7 @@
     datetime.date,
     datetime.time,
     Blob,
+    ByteString,
     Text,
     users.User,
     Category,
@@ -1554,8 +1555,8 @@
   def order(self, property):
     """Set order of query result.
 
-    To use descending order, prepend '-' (minus) to the property name, e.g.,
-    '-date' rather than 'date'.
+    To use descending order, prepend '-' (minus) to the property
+    name, e.g., '-date' rather than 'date'.
 
     Args:
       property: Property to sort on.
@@ -1573,7 +1574,8 @@
       order = datastore.Query.ASCENDING
 
     if not issubclass(self._model_class, Expando):
-      if property not in self._model_class.properties():
+      if (property not in self._model_class.properties() and
+          property not in datastore_types._SPECIAL_PROPERTIES):
         raise PropertyError('Invalid property name \'%s\'' % property)
 
     self.__orderings.append((property, order))
@@ -1829,6 +1831,37 @@
   data_type = Blob
 
 
+class ByteStringProperty(Property):
+  """A short (<=500 bytes) byte string.
+
+  This type should be used for short binary values that need to be indexed. If
+  you do not require indexing (regardless of length), use BlobProperty instead.
+  """
+
+  def validate(self, value):
+    """Validate ByteString property.
+
+    Returns:
+      A valid value.
+
+    Raises:
+      BadValueError if property is not instance of 'ByteString'.
+    """
+    if value is not None and not isinstance(value, ByteString):
+      try:
+        value = ByteString(value)
+      except TypeError, err:
+        raise BadValueError('Property %s must be convertible '
+                            'to a ByteString instance (%s)' % (self.name, err))
+    value = super(ByteStringProperty, self).validate(value)
+    if value is not None and not isinstance(value, ByteString):
+      raise BadValueError('Property %s must be a ByteString instance'
+                          % self.name)
+    return value
+
+  data_type = ByteString
+
+
 class DateTimeProperty(Property):
   """The base class of all of our date/time properties.
 
@@ -2149,24 +2182,31 @@
   """A user property."""
 
   def __init__(self, verbose_name=None, name=None,
-               required=False, validator=None, choices=None):
+               required=False, validator=None, choices=None,
+               auto_current_user=False, auto_current_user_add=False):
     """Initializes this Property with the given options.
 
-    Do not assign user properties a default value.
+    Note: this does *not* support the 'default' keyword argument.
+    Use auto_current_user_add=True instead.
 
     Args:
       verbose_name: User friendly name of property.
       name: Storage name for property.  By default, uses attribute name
         as it is assigned in the Model sub-class.
-      default: Default value for property if none is assigned.
       required: Whether property is required.
       validator: User provided method used for validation.
       choices: User provided set of valid property values.
+      auto_current_user: If true, the value is set to the current user
+        each time the entity is written to the datastore.
+      auto_current_user_add: If true, the value is set to the current user
+        the first time the entity is written to the datastore.
     """
     super(UserProperty, self).__init__(verbose_name, name,
                                        required=required,
                                        validator=validator,
                                        choices=choices)
+    self.auto_current_user = auto_current_user
+    self.auto_current_user_add = auto_current_user_add
 
   def validate(self, value):
     """Validate user.
@@ -2182,6 +2222,30 @@
       raise BadValueError('Property %s must be a User' % self.name)
     return value
 
+  def default_value(self):
+    """Default value for user.
+
+    Returns:
+      Value of users.get_current_user() if auto_current_user or
+      auto_current_user_add is set; else None. (But *not* the default
+      implementation, since we don't support the 'default' keyword
+      argument.)
+    """
+    if self.auto_current_user or self.auto_current_user_add:
+      return users.get_current_user()
+    return None
+
+  def get_value_for_datastore(self, model_instance):
+    """Get value from property to send to datastore.
+
+    Returns:
+      Value of users.get_current_user() if auto_current_user is set;
+      else the default implementation.
+    """
+    if self.auto_current_user:
+      return users.get_current_user()
+    return super(UserProperty, self).get_value_for_datastore(model_instance)
+
   data_type = users.User
 
 
--- a/thirdparty/google_appengine/google/appengine/ext/db/djangoforms.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/db/djangoforms.py	Tue Jan 20 13:19:45 2009 +0000
@@ -22,6 +22,10 @@
 db package instead, and create Django forms from it, either fully
 automatically, or with overrides.
 
+Note, you should not import these classes from this module.  Importing
+this module patches the classes in place, and you should continue to
+import them from google.appengine.db.
+
 Some of the code here is strongly inspired by Django's own ModelForm
 class (new in Django 0.97).  Our code also supports Django 0.96 (so as
 to be maximally compatible).  Note that our API is always similar to
@@ -76,6 +80,7 @@
 
 
 import itertools
+import logging
 
 
 import django.core.exceptions
@@ -208,6 +213,15 @@
     return value
 
 
+class UserProperty(db.Property):
+  """This class exists solely to log a warning when it is used."""
+
+  def __init__(self, *args, **kwds):
+    logging.warn("Please don't use modelforms.UserProperty; "
+                 "use db.UserProperty instead.")
+    super(UserProperty, self).__init__(*args, **kwds)
+
+
 class StringProperty(db.StringProperty):
   __metaclass__ = monkey_patch
 
@@ -376,20 +390,6 @@
     return bool(value)
 
 
-class UserProperty(db.UserProperty):
-  __metaclass__ = monkey_patch
-
-  def get_form_field(self, **kwargs):
-    """Return a Django form field appropriate for a user property.
-
-    This defaults to a CharField whose initial value is the current
-    username.
-    """
-    defaults = {'initial': users.GetCurrentUser()}
-    defaults.update(kwargs)
-    return super(UserProperty, self).get_form_field(**defaults)
-
-
 class StringListProperty(db.StringListProperty):
   __metaclass__ = monkey_patch
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/db/polymodel.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,350 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Support for polymorphic models and queries.
+
+The Model class on its own is only able to support functional polymorphism.
+It is possible to create a subclass of Model and then subclass that one as
+many generations as necessary and those classes will share all the same
+properties and behaviors.  The problem is that subclassing Model in this way
+places each subclass in their own Kind.  This means that it is not possible
+to do polymorphic queries.  Building a query on a base class will only return
+instances of that class from the Datastore, while queries on a subclass will
+only return those instances.
+
+This module allows applications to specify class hierarchies that support
+polymorphic queries.
+"""
+
+
+from google.appengine.ext import db
+
+_class_map = {}
+
+_CLASS_KEY_PROPERTY = 'class'
+
+
+class _ClassKeyProperty(db.ListProperty):
+  """Property representing class-key property of a polymorphic class.
+
+  The class key is a list of strings describing an polymorphic instances
+  place within its class hierarchy.  This property is automatically calculated.
+  For example:
+
+    class Foo(PolyModel): ...
+    class Bar(Foo): ...
+    class Baz(Bar): ...
+
+    Foo.class_key() == ['Foo']
+    Bar.class_key() == ['Foo', 'Bar']
+    Baz.class_key() == ['Foo', 'Bar', 'Baz']
+  """
+
+  def __init__(self, name):
+    super(_ClassKeyProperty, self).__init__(name=name,
+                                            item_type=str,
+                                            default=None)
+
+  def __set__(self, *args):
+    raise db.DerivedPropertyError(
+        'Class-key is a derived property and cannot be set.')
+
+  def __get__(self, model_instance, model_class):
+    if model_instance is None:
+      return self
+    return [cls.__name__ for cls in model_class.__class_hierarchy__]
+
+
+class PolymorphicClass(db.PropertiedClass):
+  """Meta-class for initializing PolymorphicClasses.
+
+  This class extends PropertiedClass to add a few static attributes to
+  new polymorphic classes necessary for their correct functioning.
+
+  """
+
+  def __init__(cls, name, bases, dct):
+    """Initializes a class that belongs to a polymorphic hierarchy.
+
+    This method configures a few built-in attributes of polymorphic
+    models:
+
+      __root_class__: If the new class is a root class, __root_class__ is set to
+        itself so that it subclasses can quickly know what the root of
+        their hierarchy is and what kind they are stored in.
+      __class_hierarchy__: List of classes describing the new model's place
+        in the class hierarchy.  The first element is always the root
+        element while the last element is the new class itself.  For example:
+
+          class Foo(PolymorphicClass): ...
+
+          class Bar(Foo): ...
+
+          class Baz(Bar): ...
+
+          Foo.__class_hierarchy__ == [Foo]
+          Bar.__class_hierarchy__ == [Foo, Bar]
+          Baz.__class_hierarchy__ == [Foo, Bar, Baz]
+
+    Unless the class is a root class or PolyModel itself, it is not
+    inserted in to the kind-map like other models.  However, all polymorphic
+    classes, are inserted in to the class-map which maps the class-key to
+    implementation.  This class key is consulted using the polymorphic instances
+    discriminator (the 'class' property of the entity) when loading from the
+    datastore.
+    """
+    if name == 'PolyModel' or PolyModel not in bases:
+      db._initialize_properties(cls, name, bases, dct)
+      super(db.PropertiedClass, cls).__init__(name, bases, dct)
+    else:
+      cls.__root_class__ = cls
+      super(PolymorphicClass, cls).__init__(name, bases, dct)
+
+    if name == 'PolyModel':
+      return
+
+    if cls is not cls.__root_class__:
+      poly_class = None
+      for base in cls.__bases__:
+        if issubclass(base, PolyModel):
+          poly_class = base
+          break
+      else:
+        raise db.ConfigurationError(
+            "Polymorphic class '%s' does not inherit from PolyModel."
+            % cls.__name__)
+
+      cls.__class_hierarchy__ = poly_class.__class_hierarchy__ + [cls]
+    else:
+      cls.__class_hierarchy__ = [cls]
+
+    _class_map[cls.class_key()] = cls
+
+
+class PolyModel(db.Model):
+  """Base-class for models that supports polymorphic queries.
+
+  Use this class to build hierarchies that can be queried based
+  on their types.
+
+  Example:
+
+    consider the following model hierarchy:
+
+      +------+
+      |Animal|
+      +------+
+        |
+        +-----------------+
+        |                 |
+      +------+          +------+
+      |Canine|          |Feline|
+      +------+          +------+
+        |                 |
+        +-------+         +-------+
+        |       |         |       |
+      +---+   +----+    +---+   +-------+
+      |Dog|   |Wolf|    |Cat|   |Panther|
+      +---+   +----+    +---+   +-------+
+
+    This class hierarchy has three levels.  The first is the "root class".
+    All models in a single class hierarchy must inherit from this root.  All
+    models in the hierarchy are stored as the same kind as the root class.
+    For example, Panther entities when stored to the datastore are of the kind
+    'Animal'.  Querying against the Animal kind will retrieve Cats, Dogs and
+    Canines, for example, that match your query.  Different classes stored
+    in the root class' kind are identified by their class-key.  When loaded
+    from the datastore, it is mapped to the appropriate implementation class.
+
+  Polymorphic properties:
+
+    Properties that are defined in a given base-class within a hierarchy are
+    stored in the datastore for all sub-casses only.  So, if the Feline class
+    had a property called 'whiskers', the Cat and Panther enties would also
+    have whiskers, but not Animal, Canine, Dog or Wolf.
+
+  Polymorphic queries:
+
+    When written to the datastore, all polymorphic objects automatically have
+    a property called 'class' that you can query against.  Using this property
+    it is possible to easily write a GQL query against any sub-hierarchy.  For
+    example, to fetch only Canine objects, including all Dogs and Wolves:
+
+      db.GqlQuery("SELECT * FROM Animal WHERE class='Canine'")
+
+    And alternate method is to use the 'all' or 'gql' methods of the Canine
+    class:
+
+      Canine.all()
+      Canine.gql('')
+
+    The 'class' property is not meant to be used by your code other than
+    for queries.  Since it is supposed to represents the real Python class
+    it is intended to be hidden from view.
+
+  Root class:
+
+    The root class is the class from which all other classes of the hierarchy
+    inherits from.  Each hierarchy has a single root class.  A class is a
+    root class if it is an immediate child of PolyModel.  The subclasses of
+    the root class are all the same kind as the root class. In other words:
+
+      Animal.kind() == Feline.kind() == Panther.kind() == 'Animal'
+  """
+
+  __metaclass__ = PolymorphicClass
+
+  _class = _ClassKeyProperty(name=_CLASS_KEY_PROPERTY)
+
+  def __new__(cls, *args, **kwds):
+    """Prevents direct instantiation of PolyModel."""
+    if cls is PolyModel:
+      raise NotImplementedError()
+    return super(PolyModel, cls).__new__(cls, *args, **kwds)
+
+  @classmethod
+  def kind(cls):
+    """Get kind of polymorphic model.
+
+    Overridden so that all subclasses of root classes are the same kind
+    as the root.
+
+    Returns:
+      Kind of entity to write to datastore.
+    """
+    if cls is cls.__root_class__:
+      return super(PolyModel, cls).kind()
+    else:
+      return cls.__root_class__.kind()
+
+  @classmethod
+  def class_key(cls):
+    """Caclulate the class-key for this class.
+
+    Returns:
+      Class key for class.  By default this is a the list of classes
+      of the hierarchy, starting with the root class and walking its way
+      down to cls.
+    """
+    if not hasattr(cls, '__class_hierarchy__'):
+      raise NotImplementedError(
+          'Cannot determine class key without class hierarchy')
+    return tuple(cls.class_name() for cls in cls.__class_hierarchy__)
+
+  @classmethod
+  def class_name(cls):
+    """Calculate class name for this class.
+
+    Returns name to use for each classes element within its class-key.  Used
+    to discriminate between different classes within a class hierarchy's
+    Datastore kind.
+
+    The presence of this method allows developers to use a different class
+    name in the datastore from what is used in Python code.  This is useful,
+    for example, for renaming classes without having to migrate instances
+    already written to the datastore.  For example, to rename a polymorphic
+    class Contact to SimpleContact, you could convert:
+
+      # Class key is ['Information']
+      class Information(PolyModel): ...
+
+      # Class key is ['Information', 'Contact']
+      class Contact(Information): ...
+
+    to:
+
+      # Class key is still ['Information', 'Contact']
+      class SimpleContact(Information):
+        ...
+        @classmethod
+        def class_name(cls):
+          return 'Contact'
+
+      # Class key is ['Information', 'Contact', 'ExtendedContact']
+      class ExtendedContact(SimpleContact): ...
+
+    This would ensure that all objects written previously using the old class
+    name would still be loaded.
+
+    Returns:
+      Name of this class.
+    """
+    return cls.__name__
+
+  @classmethod
+  def from_entity(cls, entity):
+    """Load from entity to class based on discriminator.
+
+    Rather than instantiating a new Model instance based on the kind
+    mapping, this creates an instance of the correct model class based
+    on the entities class-key.
+
+    Args:
+      entity: Entity loaded directly from datastore.
+
+    Raises:
+      KindError when there is no class mapping based on discriminator.
+    """
+    if (_CLASS_KEY_PROPERTY in entity and
+        tuple(entity[_CLASS_KEY_PROPERTY]) != cls.class_key()):
+      key = tuple(entity[_CLASS_KEY_PROPERTY])
+      try:
+        poly_class = _class_map[key]
+      except KeyError:
+        raise db.KindError('No implementation for class \'%s\'' % key)
+      return poly_class.from_entity(entity)
+    return super(PolyModel, cls).from_entity(entity)
+
+  @classmethod
+  def all(cls):
+    """Get all instance of a class hierarchy.
+
+    Returns:
+      Query with filter set to match this class' discriminator.
+    """
+    query = super(PolyModel, cls).all()
+    if cls != cls.__root_class__:
+      query.filter(_CLASS_KEY_PROPERTY + ' =', cls.class_name())
+    return query
+
+  @classmethod
+  def gql(cls, query_string, *args, **kwds):
+    """Returns a polymorphic query using GQL query string.
+
+    This query is polymorphic in that it has its filters configured in a way
+    to retrieve instances of the model or an instance of a subclass of the
+    model.
+
+    Args:
+      query_string: properly formatted GQL query string with the
+        'SELECT * FROM <entity>' part omitted
+      *args: rest of the positional arguments used to bind numeric references
+        in the query.
+      **kwds: dictionary-based arguments (for named parameters).
+    """
+    if cls == cls.__root_class__:
+      return super(PolyModel, cls).gql(query_string, *args, **kwds)
+    else:
+      from google.appengine.ext import gql
+
+      query = db.GqlQuery('SELECT * FROM %s %s' % (cls.kind(), query_string))
+
+      query_filter = [('nop',
+                       [gql.Literal(cls.class_name())])]
+      query._proto_query.filters()[('class', '=')] = query_filter
+      query.bind(*args, **kwds)
+      return query
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,398 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""An apiproxy stub that calls a remote handler via HTTP.
+
+This allows easy remote access to the App Engine datastore, and potentially any
+of the other App Engine APIs, using the same interface you use when accessing
+the service locally.
+
+An example Python script:
+---
+from google.appengine.ext import db
+from google.appengine.ext import remote_api
+from myapp import models
+import getpass
+
+def auth_func():
+  return (raw_input('Username:'), getpass.getpass('Password:'))
+
+remote_api.ConfigureRemoteDatastore('my-app', '/remote_api', auth_func)
+
+# Now you can access the remote datastore just as if your code was running on
+# App Engine!
+
+houses = models.House.all().fetch(100)
+for a_house in q:
+  a_house.doors += 1
+db.put(houses)
+---
+
+A few caveats:
+- Where possible, avoid iterating over queries directly. Fetching as many
+  results as you will need is faster and more efficient.
+- If you need to iterate, consider instead fetching items in batches with a sort
+  order and constructing a new query starting from where the previous one left
+  off. The __key__ pseudo-property can be used as a sort key for this purpose,
+  and does not even require a custom index if you are iterating over all
+  entities of a given type.
+- Likewise, it's a good idea to put entities in batches. Instead of calling put
+  for each individual entity, accumulate them and put them in batches using
+  db.put(), if you can.
+- Requests and responses are still limited to 1MB each, so if you have large
+  entities or try and fetch or put many of them at once, your requests may fail.
+"""
+
+
+
+
+
+import os
+import pickle
+import sha
+import sys
+import thread
+import threading
+from google.appengine.api import apiproxy_stub_map
+from google.appengine.datastore import datastore_pb
+from google.appengine.ext.remote_api import remote_api_pb
+from google.appengine.runtime import apiproxy_errors
+from google.appengine.tools import appengine_rpc
+
+
+def GetUserAgent():
+  """Determines the value of the 'User-agent' header to use for HTTP requests.
+
+  Returns:
+    String containing the 'user-agent' header value, which includes the SDK
+    version, the platform information, and the version of Python;
+    e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
+  """
+  product_tokens = []
+
+  product_tokens.append("Google-remote_api/1.0")
+
+  product_tokens.append(appengine_rpc.GetPlatformToken())
+
+  python_version = ".".join(str(i) for i in sys.version_info)
+  product_tokens.append("Python/%s" % python_version)
+
+  return " ".join(product_tokens)
+
+
+def GetSourceName():
+  return "Google-remote_api-1.0"
+
+
+class TransactionData(object):
+  """Encapsulates data about an individual transaction."""
+
+  def __init__(self, thread_id):
+    self.thread_id = thread_id
+    self.preconditions = {}
+    self.entities = {}
+
+
+class RemoteStub(object):
+  """A stub for calling services on a remote server over HTTP.
+
+  You can use this to stub out any service that the remote server supports.
+  """
+
+  def __init__(self, server, path):
+    """Constructs a new RemoteStub that communicates with the specified server.
+
+    Args:
+      server: An instance of a subclass of
+        google.appengine.tools.appengine_rpc.AbstractRpcServer.
+      path: The path to the handler this stub should send requests to.
+    """
+    self._server = server
+    self._path = path
+
+  def MakeSyncCall(self, service, call, request, response):
+    request_pb = remote_api_pb.Request()
+    request_pb.set_service_name(service)
+    request_pb.set_method(call)
+    request_pb.mutable_request().set_contents(request.Encode())
+
+    response_pb = remote_api_pb.Response()
+    response_pb.ParseFromString(self._server.Send(self._path,
+                                                  request_pb.Encode()))
+
+    if response_pb.has_exception():
+      raise pickle.loads(response_pb.exception().contents())
+    else:
+      response.ParseFromString(response_pb.response().contents())
+
+
+class RemoteDatastoreStub(RemoteStub):
+  """A specialised stub for accessing the App Engine datastore remotely.
+
+  A specialised stub is required because there are some datastore operations
+  that preserve state between calls. This stub makes queries possible.
+  Transactions on the remote datastore are unfortunately still impossible.
+  """
+
+  def __init__(self, server, path):
+    super(RemoteDatastoreStub, self).__init__(server, path)
+    self.__queries = {}
+    self.__transactions = {}
+
+    self.__next_local_cursor = 1
+    self.__local_cursor_lock = threading.Lock()
+    self.__next_local_tx = 1
+    self.__local_tx_lock = threading.Lock()
+
+  def MakeSyncCall(self, service, call, request, response):
+    assert service == 'datastore_v3'
+
+    explanation = []
+    assert request.IsInitialized(explanation), explanation
+
+    handler = getattr(self, '_Dynamic_' + call, None)
+    if handler:
+      handler(request, response)
+    else:
+      super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
+                                                    response)
+
+    assert response.IsInitialized(explanation), explanation
+
+  def _Dynamic_RunQuery(self, query, query_result):
+    self.__local_cursor_lock.acquire()
+    try:
+      cursor_id = self.__next_local_cursor
+      self.__next_local_cursor += 1
+    finally:
+      self.__local_cursor_lock.release()
+    self.__queries[cursor_id] = query
+
+    query_result.mutable_cursor().set_cursor(cursor_id)
+    query_result.set_more_results(True)
+
+  def _Dynamic_Next(self, next_request, query_result):
+    cursor = next_request.cursor().cursor()
+    if cursor not in self.__queries:
+      raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
+                                             'Cursor %d not found' % cursor)
+    query = self.__queries[cursor]
+
+    if query is None:
+      query_result.set_more_results(False)
+      return
+
+    request = datastore_pb.Query()
+    request.CopyFrom(query)
+    if request.has_limit():
+      request.set_limit(min(request.limit(), next_request.count()))
+    else:
+      request.set_limit(next_request.count())
+
+    super(RemoteDatastoreStub, self).MakeSyncCall(
+        'remote_datastore', 'RunQuery', request, query_result)
+
+    query.set_offset(query.offset() + query_result.result_size())
+    if query.has_limit():
+      query.set_limit(query.limit() - query_result.result_size())
+    if not query_result.more_results():
+      self.__queries[cursor] = None
+
+  def _Dynamic_Get(self, get_request, get_response):
+    txid = None
+    if get_request.has_transaction():
+      txid = get_request.transaction().handle()
+      txdata = self.__transactions[txid]
+      assert (txdata.thread_id == thread.get_ident(),
+              "Transactions are single-threaded.")
+
+      keys = [(k, k.Encode()) for k in get_request.key_list()]
+
+      new_request = datastore_pb.GetRequest()
+      for key, enckey in keys:
+        if enckey not in txdata.entities:
+          new_request.add_key().CopyFrom(key)
+    else:
+      new_request = get_request
+
+    if new_request.key_size() > 0:
+      super(RemoteDatastoreStub, self).MakeSyncCall(
+          'datastore_v3', 'Get', new_request, get_response)
+
+    if txid is not None:
+      newkeys = new_request.key_list()
+      entities = get_response.entity_list()
+      for key, entity in zip(newkeys, entities):
+        entity_hash = None
+        if entity.has_entity():
+          entity_hash = sha.new(entity.entity().Encode()).digest()
+        txdata.preconditions[key.Encode()] = (key, entity_hash)
+
+      new_response = datastore_pb.GetResponse()
+      it = iter(get_response.entity_list())
+      for key, enckey in keys:
+        if enckey in txdata.entities:
+          cached_entity = txdata.entities[enckey][1]
+          if cached_entity:
+            new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
+          else:
+            new_response.add_entity()
+        else:
+          new_entity = it.next()
+          if new_entity.has_entity():
+            assert new_entity.entity().key() == key
+            new_response.add_entity().CopyFrom(new_entity)
+          else:
+            new_response.add_entity()
+      get_response.CopyFrom(new_response)
+
+  def _Dynamic_Put(self, put_request, put_response):
+    if put_request.has_transaction():
+      entities = put_request.entity_list()
+
+      requires_id = lambda x: x.id() == 0 and not x.has_name()
+      new_ents = [e for e in entities
+                  if requires_id(e.key().path().element_list()[-1])]
+      id_request = remote_api_pb.PutRequest()
+      if new_ents:
+        for ent in new_ents:
+          e = id_request.add_entity()
+          e.mutable_key().CopyFrom(ent.key())
+          e.mutable_entity_group()
+        id_response = datastore_pb.PutResponse()
+        super(RemoteDatastoreStub, self).MakeSyncCall(
+            'remote_datastore', 'GetIDs', id_request, id_response)
+        assert id_request.entity_size() == id_response.key_size()
+        for key, ent in zip(id_response.key_list(), new_ents):
+          ent.mutable_key().CopyFrom(key)
+          ent.mutable_entity_group().add_element().CopyFrom(
+              key.path().element(0))
+
+      txid = put_request.transaction().handle()
+      txdata = self.__transactions[txid]
+      assert (txdata.thread_id == thread.get_ident(),
+              "Transactions are single-threaded.")
+      for entity in entities:
+        txdata.entities[entity.key().Encode()] = (entity.key(), entity)
+        put_response.add_key().CopyFrom(entity.key())
+    else:
+      super(RemoteDatastoreStub, self).MakeSyncCall(
+          'datastore_v3', 'Put', put_request, put_response)
+
+  def _Dynamic_Delete(self, delete_request, response):
+    if delete_request.has_transaction():
+      txid = delete_request.transaction().handle()
+      txdata = self.__transactions[txid]
+      assert (txdata.thread_id == thread.get_ident(),
+              "Transactions are single-threaded.")
+      for key in delete_request.key_list():
+        txdata.entities[key.Encode()] = (key, None)
+    else:
+      super(RemoteDatastoreStub, self).MakeSyncCall(
+          'datastore_v3', 'Delete', delete_request, response)
+
+  def _Dynamic_BeginTransaction(self, request, transaction):
+    self.__local_tx_lock.acquire()
+    try:
+      txid = self.__next_local_tx
+      self.__transactions[txid] = TransactionData(thread.get_ident())
+      self.__next_local_tx += 1
+    finally:
+      self.__local_tx_lock.release()
+    transaction.set_handle(txid)
+
+  def _Dynamic_Commit(self, transaction, transaction_response):
+    txid = transaction.handle()
+    if txid not in self.__transactions:
+      raise apiproxy_errors.ApplicationError(
+          datastore_pb.Error.BAD_REQUEST,
+          'Transaction %d not found.' % (txid,))
+
+    txdata = self.__transactions[txid]
+    assert (txdata.thread_id == thread.get_ident(),
+            "Transactions are single-threaded.")
+    del self.__transactions[txid]
+
+    tx = remote_api_pb.TransactionRequest()
+    for key, hash in txdata.preconditions.values():
+      precond = tx.add_precondition()
+      precond.mutable_key().CopyFrom(key)
+      if hash:
+        precond.set_hash(hash)
+
+    puts = tx.mutable_puts()
+    deletes = tx.mutable_deletes()
+    for key, entity in txdata.entities.values():
+      if entity:
+        puts.add_entity().CopyFrom(entity)
+      else:
+        deletes.add_key().CopyFrom(key)
+
+    super(RemoteDatastoreStub, self).MakeSyncCall(
+        'remote_datastore', 'Transaction',
+        tx, datastore_pb.PutResponse())
+
+  def _Dynamic_Rollback(self, transaction, transaction_response):
+    txid = transaction.handle()
+    self.__local_tx_lock.acquire()
+    try:
+      if txid not in self.__transactions:
+        raise apiproxy_errors.ApplicationError(
+            datastore_pb.Error.BAD_REQUEST,
+            'Transaction %d not found.' % (txid,))
+
+      assert (txdata[txid].thread_id == thread.get_ident(),
+              "Transactions are single-threaded.")
+      del self.__transactions[txid]
+    finally:
+      self.__local_tx_lock.release()
+
+  def _Dynamic_CreateIndex(self, index, id_response):
+    raise apiproxy_errors.CapabilityDisabledError(
+        'The remote datastore does not support index manipulation.')
+
+  def _Dynamic_UpdateIndex(self, index, void):
+    raise apiproxy_errors.CapabilityDisabledError(
+        'The remote datastore does not support index manipulation.')
+
+  def _Dynamic_DeleteIndex(self, index, void):
+    raise apiproxy_errors.CapabilityDisabledError(
+        'The remote datastore does not support index manipulation.')
+
+
+def ConfigureRemoteDatastore(app_id, path, auth_func, servername=None):
+  """Does necessary setup to allow easy remote access to an AppEngine datastore.
+
+  Args:
+    app_id: The app_id of your app, as declared in app.yaml.
+    path: The path to the remote_api handler for your app
+      (for example, '/remote_api').
+    auth_func: A function that takes no arguments and returns a
+      (username, password) tuple. This will be called if your application
+      requires authentication to access the remote_api handler (it should!)
+      and you do not already have a valid auth cookie.
+    servername: The hostname your app is deployed on. Defaults to
+      <app_id>.appspot.com.
+  """
+  if not servername:
+    servername = '%s.appspot.com' % (app_id,)
+  os.environ['APPLICATION_ID'] = app_id
+  apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
+  server = appengine_rpc.HttpRpcServer(servername, auth_func, GetUserAgent(),
+                                       GetSourceName())
+  stub = RemoteDatastoreStub(server, path)
+  apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', stub)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/handler.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,259 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""A handler that exports various App Engine services over HTTP.
+
+You can export this handler in your app by adding it directly to app.yaml's
+list of handlers:
+
+  handlers:
+  - url: /remote_api
+    script: $PYTHON_LIB/google/appengine/ext/remote_api/handler.py
+    login: admin
+
+Then, you can use remote_api_stub to remotely access services exported by this
+handler. See the documentation in remote_api_stub.py for details on how to do
+this.
+
+Using this handler without specifying "login: admin" would be extremely unwise.
+So unwise that the default handler insists on checking for itself.
+"""
+
+
+
+
+
+import google
+import pickle
+import sha
+import wsgiref.handlers
+from google.appengine.api import api_base_pb
+from google.appengine.api import apiproxy_stub
+from google.appengine.api import apiproxy_stub_map
+from google.appengine.api import users
+from google.appengine.datastore import datastore_pb
+from google.appengine.ext import webapp
+from google.appengine.ext.remote_api import remote_api_pb
+from google.appengine.runtime import apiproxy_errors
+
+
+class RemoteDatastoreStub(apiproxy_stub.APIProxyStub):
+  """Provides a stub that permits execution of stateful datastore queries.
+
+  Some operations aren't possible using the standard interface. Notably,
+  datastore RunQuery operations internally store a cursor that is referenced in
+  later Next calls, and cleaned up at the end of each request. Because every
+  call to ApiCallHandler takes place in its own request, this isn't possible.
+
+  To work around this, RemoteDatastoreStub provides its own implementation of
+  RunQuery that immediately returns the query results.
+  """
+
+  def _Dynamic_RunQuery(self, request, response):
+    """Handle a RunQuery request.
+
+    We handle RunQuery by executing a Query and a Next and returning the result
+    of the Next request.
+    """
+    runquery_response = datastore_pb.QueryResult()
+    apiproxy_stub_map.MakeSyncCall('datastore_v3', 'RunQuery',
+                                   request, runquery_response)
+    next_request = datastore_pb.NextRequest()
+    next_request.mutable_cursor().CopyFrom(runquery_response.cursor())
+    next_request.set_count(request.limit())
+    apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Next',
+                                   next_request, response)
+
+  def _Dynamic_Transaction(self, request, response):
+    """Handle a Transaction request.
+
+    We handle transactions by accumulating Put requests on the client end, as
+    well as recording the key and hash of Get requests. When Commit is called,
+    Transaction is invoked, which verifies that all the entities in the
+    precondition list still exist and their hashes match, then performs a
+    transaction of its own to make the updates.
+    """
+    tx = datastore_pb.Transaction()
+    apiproxy_stub_map.MakeSyncCall('datastore_v3', 'BeginTransaction',
+                                   api_base_pb.VoidProto(), tx)
+
+    preconditions = request.precondition_list()
+    if preconditions:
+      get_request = datastore_pb.GetRequest()
+      get_request.mutable_transaction().CopyFrom(tx)
+      for precondition in preconditions:
+        key = get_request.add_key()
+        key.CopyFrom(precondition.key())
+      get_response = datastore_pb.GetResponse()
+      apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Get', get_request,
+                                     get_response)
+      entities = get_response.entity_list()
+      assert len(entities) == request.precondition_size()
+      for precondition, entity in zip(preconditions, entities):
+        if precondition.has_hash() != entity.has_entity():
+          raise apiproxy_errors.ApplicationError(
+              datastore_pb.Error.CONCURRENT_TRANSACTION,
+              "Transaction precondition failed.")
+        elif entity.has_entity():
+          entity_hash = sha.new(entity.entity().Encode()).digest()
+          if precondition.hash() != entity_hash:
+            raise apiproxy_errors.ApplicationError(
+                datastore_pb.Error.CONCURRENT_TRANSACTION,
+                "Transaction precondition failed.")
+
+    if request.has_puts():
+      put_request = request.puts()
+      put_request.mutable_transaction().CopyFrom(tx)
+      apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Put',
+                                     put_request, response)
+
+    if request.has_deletes():
+      delete_request = request.deletes()
+      delete_request.mutable_transaction().CopyFrom(tx)
+      apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Delete',
+                                     delete_request, api_base_pb.VoidProto())
+
+    apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Commit', tx,
+                                   api_base_pb.VoidProto())
+
+  def _Dynamic_GetIDs(self, request, response):
+    """Fetch unique IDs for a set of paths."""
+    for entity in request.entity_list():
+      assert entity.property_size() == 0
+      assert entity.raw_property_size() == 0
+      assert entity.entity_group().element_size() == 0
+      lastpart = entity.key().path().element_list()[-1]
+      assert lastpart.id() == 0 and not lastpart.has_name()
+
+    tx = datastore_pb.Transaction()
+    apiproxy_stub_map.MakeSyncCall('datastore_v3', 'BeginTransaction',
+                                   api_base_pb.VoidProto(), tx)
+
+    apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Put', request, response)
+
+    apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Rollback', tx,
+                                   api_base_pb.VoidProto())
+
+
+SERVICE_PB_MAP = {
+    'datastore_v3': {
+        'Get': (datastore_pb.GetRequest, datastore_pb.GetResponse),
+        'Put': (datastore_pb.PutRequest, datastore_pb.PutResponse),
+        'Delete': (datastore_pb.DeleteRequest, datastore_pb.DeleteResponse),
+        'Count': (datastore_pb.Query, api_base_pb.Integer64Proto),
+        'GetIndices': (api_base_pb.StringProto, datastore_pb.CompositeIndices),
+    },
+    'remote_datastore': {
+        'RunQuery': (datastore_pb.Query, datastore_pb.QueryResult),
+        'Transaction': (remote_api_pb.TransactionRequest,
+                             datastore_pb.PutResponse),
+        'GetIDs': (remote_api_pb.PutRequest, datastore_pb.PutResponse),
+    },
+}
+
+
+class ApiCallHandler(webapp.RequestHandler):
+  """A webapp handler that accepts API calls over HTTP and executes them."""
+
+  LOCAL_STUBS = {
+      'remote_datastore': RemoteDatastoreStub('remote_datastore'),
+  }
+
+  def CheckIsAdmin(self):
+    if not users.is_current_user_admin():
+      self.response.set_status(401)
+      self.response.out.write(
+          "You must be logged in as an administrator to access this.")
+      self.response.headers['Content-Type'] = 'text/plain'
+      return False
+    elif 'X-appcfg-api-version' not in self.request.headers:
+      self.response.set_status(403)
+      self.response.out.write("This request did not contain a necessary header")
+      return False
+    return True
+
+
+  def get(self):
+    """Handle a GET. Just show an info page."""
+    if not self.CheckIsAdmin():
+      return
+
+    page = self.InfoPage()
+    self.response.out.write(page)
+
+  def post(self):
+    """Handle POST requests by executing the API call."""
+    if not self.CheckIsAdmin():
+      return
+
+    self.response.headers['Content-Type'] = 'application/octet-stream'
+    response = remote_api_pb.Response()
+    try:
+      request = remote_api_pb.Request()
+      request.ParseFromString(self.request.body)
+      response_data = self.ExecuteRequest(request)
+      response.mutable_response().set_contents(response_data.Encode())
+      self.response.set_status(200)
+    except Exception, e:
+      self.response.set_status(500)
+      response.mutable_exception().set_contents(pickle.dumps(e))
+    self.response.out.write(response.Encode())
+
+  def ExecuteRequest(self, request):
+    """Executes an API invocation and returns the response object."""
+    service = request.service_name()
+    method = request.method()
+    service_methods = SERVICE_PB_MAP.get(service, {})
+    request_class, response_class = service_methods.get(method, (None, None))
+    if not request_class:
+      raise apiproxy_errors.CallNotFoundError()
+
+    request_data = request_class()
+    request_data.ParseFromString(request.request().contents())
+    response_data = response_class()
+
+    if service in self.LOCAL_STUBS:
+      self.LOCAL_STUBS[service].MakeSyncCall(service, method, request_data,
+                                             response_data)
+    else:
+      apiproxy_stub_map.MakeSyncCall(service, method, request_data,
+                                     response_data)
+
+    return response_data
+
+  def InfoPage(self):
+    """Renders an information page."""
+    return """
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html><head>
+<title>App Engine API endpoint.</title>
+</head><body>
+<h1>App Engine API endpoint.</h1>
+<p>This is an endpoint for the App Engine remote API interface.
+Point your stubs (google.appengine.ext.remote_api.remote_api_stub) here.</p>
+</body>
+</html>"""
+
+
+def main():
+  application = webapp.WSGIApplication([('.*', ApiCallHandler)])
+  wsgiref.handlers.CGIHandler().run(application)
+
+
+if __name__ == '__main__':
+  main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_pb.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,600 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.net.proto import ProtocolBuffer
+import array
+import dummy_thread as thread
+
+__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
+                   unusednames=printElemNumber,debug_strs no-special"""
+
+from google.net.proto.RawMessage import RawMessage
+from google.appengine.datastore.datastore_pb import PutRequest
+from google.appengine.datastore.datastore_pb import DeleteRequest
+from google.appengine.datastore.entity_pb import Reference
+class Request(ProtocolBuffer.ProtocolMessage):
+  has_service_name_ = 0
+  service_name_ = ""
+  has_method_ = 0
+  method_ = ""
+  has_request_ = 0
+
+  def __init__(self, contents=None):
+    self.request_ = RawMessage()
+    if contents is not None: self.MergeFromString(contents)
+
+  def service_name(self): return self.service_name_
+
+  def set_service_name(self, x):
+    self.has_service_name_ = 1
+    self.service_name_ = x
+
+  def clear_service_name(self):
+    self.has_service_name_ = 0
+    self.service_name_ = ""
+
+  def has_service_name(self): return self.has_service_name_
+
+  def method(self): return self.method_
+
+  def set_method(self, x):
+    self.has_method_ = 1
+    self.method_ = x
+
+  def clear_method(self):
+    self.has_method_ = 0
+    self.method_ = ""
+
+  def has_method(self): return self.has_method_
+
+  def request(self): return self.request_
+
+  def mutable_request(self): self.has_request_ = 1; return self.request_
+
+  def clear_request(self):self.has_request_ = 0; self.request_.Clear()
+
+  def has_request(self): return self.has_request_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_service_name()): self.set_service_name(x.service_name())
+    if (x.has_method()): self.set_method(x.method())
+    if (x.has_request()): self.mutable_request().MergeFrom(x.request())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_service_name_ != x.has_service_name_: return 0
+    if self.has_service_name_ and self.service_name_ != x.service_name_: return 0
+    if self.has_method_ != x.has_method_: return 0
+    if self.has_method_ and self.method_ != x.method_: return 0
+    if self.has_request_ != x.has_request_: return 0
+    if self.has_request_ and self.request_ != x.request_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_service_name_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: service_name not set.')
+    if (not self.has_method_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: method not set.')
+    if (not self.has_request_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: request not set.')
+    elif not self.request_.IsInitialized(debug_strs): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.service_name_))
+    n += self.lengthString(len(self.method_))
+    n += self.lengthString(self.request_.ByteSize())
+    return n + 3
+
+  def Clear(self):
+    self.clear_service_name()
+    self.clear_method()
+    self.clear_request()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(18)
+    out.putPrefixedString(self.service_name_)
+    out.putVarInt32(26)
+    out.putPrefixedString(self.method_)
+    out.putVarInt32(34)
+    out.putVarInt32(self.request_.ByteSize())
+    self.request_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 18:
+        self.set_service_name(d.getPrefixedString())
+        continue
+      if tt == 26:
+        self.set_method(d.getPrefixedString())
+        continue
+      if tt == 34:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_request().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_service_name_: res+=prefix+("service_name: %s\n" % self.DebugFormatString(self.service_name_))
+    if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatString(self.method_))
+    if self.has_request_:
+      res+=prefix+"request <\n"
+      res+=self.request_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kservice_name = 2
+  kmethod = 3
+  krequest = 4
+
+  _TEXT = (
+   "ErrorCode",
+   None,
+   "service_name",
+   "method",
+   "request",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.MAX_TYPE,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class Response(ProtocolBuffer.ProtocolMessage):
+  has_response_ = 0
+  response_ = None
+  has_exception_ = 0
+  exception_ = None
+
+  def __init__(self, contents=None):
+    self.lazy_init_lock_ = thread.allocate_lock()
+    if contents is not None: self.MergeFromString(contents)
+
+  def response(self):
+    if self.response_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.response_ is None: self.response_ = RawMessage()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.response_
+
+  def mutable_response(self): self.has_response_ = 1; return self.response()
+
+  def clear_response(self):
+    self.has_response_ = 0;
+    if self.response_ is not None: self.response_.Clear()
+
+  def has_response(self): return self.has_response_
+
+  def exception(self):
+    if self.exception_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.exception_ is None: self.exception_ = RawMessage()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.exception_
+
+  def mutable_exception(self): self.has_exception_ = 1; return self.exception()
+
+  def clear_exception(self):
+    self.has_exception_ = 0;
+    if self.exception_ is not None: self.exception_.Clear()
+
+  def has_exception(self): return self.has_exception_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_response()): self.mutable_response().MergeFrom(x.response())
+    if (x.has_exception()): self.mutable_exception().MergeFrom(x.exception())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_response_ != x.has_response_: return 0
+    if self.has_response_ and self.response_ != x.response_: return 0
+    if self.has_exception_ != x.has_exception_: return 0
+    if self.has_exception_ and self.exception_ != x.exception_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (self.has_response_ and not self.response_.IsInitialized(debug_strs)): initialized = 0
+    if (self.has_exception_ and not self.exception_.IsInitialized(debug_strs)): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    if (self.has_response_): n += 1 + self.lengthString(self.response_.ByteSize())
+    if (self.has_exception_): n += 1 + self.lengthString(self.exception_.ByteSize())
+    return n + 0
+
+  def Clear(self):
+    self.clear_response()
+    self.clear_exception()
+
+  def OutputUnchecked(self, out):
+    if (self.has_response_):
+      out.putVarInt32(10)
+      out.putVarInt32(self.response_.ByteSize())
+      self.response_.OutputUnchecked(out)
+    if (self.has_exception_):
+      out.putVarInt32(18)
+      out.putVarInt32(self.exception_.ByteSize())
+      self.exception_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_response().TryMerge(tmp)
+        continue
+      if tt == 18:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_exception().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_response_:
+      res+=prefix+"response <\n"
+      res+=self.response_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    if self.has_exception_:
+      res+=prefix+"exception <\n"
+      res+=self.exception_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kresponse = 1
+  kexception = 2
+
+  _TEXT = (
+   "ErrorCode",
+   "response",
+   "exception",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TransactionRequest_Precondition(ProtocolBuffer.ProtocolMessage):
+  has_key_ = 0
+  has_hash_ = 0
+  hash_ = ""
+
+  def __init__(self, contents=None):
+    self.key_ = Reference()
+    if contents is not None: self.MergeFromString(contents)
+
+  def key(self): return self.key_
+
+  def mutable_key(self): self.has_key_ = 1; return self.key_
+
+  def clear_key(self):self.has_key_ = 0; self.key_.Clear()
+
+  def has_key(self): return self.has_key_
+
+  def hash(self): return self.hash_
+
+  def set_hash(self, x):
+    self.has_hash_ = 1
+    self.hash_ = x
+
+  def clear_hash(self):
+    self.has_hash_ = 0
+    self.hash_ = ""
+
+  def has_hash(self): return self.has_hash_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_key()): self.mutable_key().MergeFrom(x.key())
+    if (x.has_hash()): self.set_hash(x.hash())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_key_ != x.has_key_: return 0
+    if self.has_key_ and self.key_ != x.key_: return 0
+    if self.has_hash_ != x.has_hash_: return 0
+    if self.has_hash_ and self.hash_ != x.hash_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_key_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: key not set.')
+    elif not self.key_.IsInitialized(debug_strs): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(self.key_.ByteSize())
+    if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
+    return n + 1
+
+  def Clear(self):
+    self.clear_key()
+    self.clear_hash()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(18)
+    out.putVarInt32(self.key_.ByteSize())
+    self.key_.OutputUnchecked(out)
+    if (self.has_hash_):
+      out.putVarInt32(26)
+      out.putPrefixedString(self.hash_)
+
+  def TryMerge(self, d):
+    while 1:
+      tt = d.getVarInt32()
+      if tt == 12: break
+      if tt == 18:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_key().TryMerge(tmp)
+        continue
+      if tt == 26:
+        self.set_hash(d.getPrefixedString())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_key_:
+      res+=prefix+"key <\n"
+      res+=self.key_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    if self.has_hash_: res+=prefix+("hash: %s\n" % self.DebugFormatString(self.hash_))
+    return res
+
+class TransactionRequest(ProtocolBuffer.ProtocolMessage):
+  has_puts_ = 0
+  puts_ = None
+  has_deletes_ = 0
+  deletes_ = None
+
+  def __init__(self, contents=None):
+    self.precondition_ = []
+    self.lazy_init_lock_ = thread.allocate_lock()
+    if contents is not None: self.MergeFromString(contents)
+
+  def precondition_size(self): return len(self.precondition_)
+  def precondition_list(self): return self.precondition_
+
+  def precondition(self, i):
+    return self.precondition_[i]
+
+  def mutable_precondition(self, i):
+    return self.precondition_[i]
+
+  def add_precondition(self):
+    x = TransactionRequest_Precondition()
+    self.precondition_.append(x)
+    return x
+
+  def clear_precondition(self):
+    self.precondition_ = []
+  def puts(self):
+    if self.puts_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.puts_ is None: self.puts_ = PutRequest()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.puts_
+
+  def mutable_puts(self): self.has_puts_ = 1; return self.puts()
+
+  def clear_puts(self):
+    self.has_puts_ = 0;
+    if self.puts_ is not None: self.puts_.Clear()
+
+  def has_puts(self): return self.has_puts_
+
+  def deletes(self):
+    if self.deletes_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.deletes_ is None: self.deletes_ = DeleteRequest()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.deletes_
+
+  def mutable_deletes(self): self.has_deletes_ = 1; return self.deletes()
+
+  def clear_deletes(self):
+    self.has_deletes_ = 0;
+    if self.deletes_ is not None: self.deletes_.Clear()
+
+  def has_deletes(self): return self.has_deletes_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.precondition_size()): self.add_precondition().CopyFrom(x.precondition(i))
+    if (x.has_puts()): self.mutable_puts().MergeFrom(x.puts())
+    if (x.has_deletes()): self.mutable_deletes().MergeFrom(x.deletes())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.precondition_) != len(x.precondition_): return 0
+    for e1, e2 in zip(self.precondition_, x.precondition_):
+      if e1 != e2: return 0
+    if self.has_puts_ != x.has_puts_: return 0
+    if self.has_puts_ and self.puts_ != x.puts_: return 0
+    if self.has_deletes_ != x.has_deletes_: return 0
+    if self.has_deletes_ and self.deletes_ != x.deletes_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.precondition_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    if (self.has_puts_ and not self.puts_.IsInitialized(debug_strs)): initialized = 0
+    if (self.has_deletes_ and not self.deletes_.IsInitialized(debug_strs)): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 2 * len(self.precondition_)
+    for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSize()
+    if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSize())
+    if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSize())
+    return n + 0
+
+  def Clear(self):
+    self.clear_precondition()
+    self.clear_puts()
+    self.clear_deletes()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.precondition_)):
+      out.putVarInt32(11)
+      self.precondition_[i].OutputUnchecked(out)
+      out.putVarInt32(12)
+    if (self.has_puts_):
+      out.putVarInt32(34)
+      out.putVarInt32(self.puts_.ByteSize())
+      self.puts_.OutputUnchecked(out)
+    if (self.has_deletes_):
+      out.putVarInt32(42)
+      out.putVarInt32(self.deletes_.ByteSize())
+      self.deletes_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 11:
+        self.add_precondition().TryMerge(d)
+        continue
+      if tt == 34:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_puts().TryMerge(tmp)
+        continue
+      if tt == 42:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_deletes().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.precondition_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("Precondition%s {\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+"}\n"
+      cnt+=1
+    if self.has_puts_:
+      res+=prefix+"puts <\n"
+      res+=self.puts_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    if self.has_deletes_:
+      res+=prefix+"deletes <\n"
+      res+=self.deletes_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kPreconditionGroup = 1
+  kPreconditionkey = 2
+  kPreconditionhash = 3
+  kputs = 4
+  kdeletes = 5
+
+  _TEXT = (
+   "ErrorCode",
+   "Precondition",
+   "key",
+   "hash",
+   "puts",
+   "deletes",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STARTGROUP,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+
+__all__ = ['Request','Response','TransactionRequest','TransactionRequest_Precondition']
--- a/thirdparty/google_appengine/google/appengine/ext/webapp/__init__.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/webapp/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -379,12 +379,11 @@
       debug_mode: True if the web application is running in debug mode
     """
     self.error(500)
-    lines = ''.join(traceback.format_exception(*sys.exc_info()))
-    logging.error(lines)
+    logging.exception(exception)
     if debug_mode:
+      lines = ''.join(traceback.format_exception(*sys.exc_info()))
       self.response.clear()
-      self.response.headers['Content-Type'] = 'text/plain'
-      self.response.out.write(lines)
+      self.response.out.write('<pre>%s</pre>' % (cgi.escape(lines, quote=True)))
 
   @classmethod
   def get_url(cls, *args, **kargs):
--- a/thirdparty/google_appengine/google/appengine/runtime/apiproxy.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/runtime/apiproxy.py	Tue Jan 20 13:19:45 2009 +0000
@@ -26,8 +26,9 @@
 import sys
 from google.net.proto import ProtocolBuffer
 from google.appengine import runtime
+from google.appengine.api import apiproxy_rpc
+from google3.apphosting.runtime import _apphosting_runtime___python__apiproxy
 from google.appengine.runtime import apiproxy_errors
-from google3.apphosting.runtime import _apphosting_runtime___python__apiproxy
 
 OK                =  0
 RPC_FAILED        =  1
@@ -74,7 +75,7 @@
 
 }
 
-class RPC(object):
+class RPC(apiproxy_rpc.RPC):
   """A RPC object, suitable for talking to remote services.
 
   Each instance of this object can be used only once, and should not be reused.
@@ -82,73 +83,15 @@
   Stores the data members and methods for making RPC calls via the APIProxy.
   """
 
-  IDLE = 0
-  RUNNING = 1
-  FINISHING = 2
-
-  def __init__(self, package=None, call=None, request=None, response=None,
-               callback=None):
+  def __init__(self, *args, **kargs):
     """Constructor for the RPC object. All arguments are optional, and
     simply set members on the class. These data members will be
     overriden by values passed to MakeCall.
-
-    Args:
-      package: string, the package for the call
-      call: string, the call within the package
-      request: ProtocolMessage instance, appropriate for the arguments
-      response: ProtocolMessage instance, appropriate for the response
-      callback: callable, called when call is complete
     """
-    self.__exception = None
-    self.__traceback = None
+    super(RPC, self).__init__(*args, **kargs)
     self.__result_dict = {}
-    self.__state = RPC.IDLE
-
-    self.package = package
-    self.call = call
-    self.request = request
-    self.response = response
-    self.callback = callback
-
 
-  def MakeCall(self, package=None, call=None, request=None, response=None,
-               callback=None):
-    """Makes an asynchronous (i.e. non-blocking) API call within the
-    specified package for the specified call method. request and response must
-    be the appropriately typed ProtocolBuffers for the API call.
-    callback, if provided, will be called when the request completes
-    successfully or when an error occurs.  If an error has ocurred, the
-    exception() method on this class will return the error, which can be
-    accessed from the callback.
-
-    Args:
-      Same as constructor; see __init__.
-
-    Raises:
-      TypeError or AssertionError if an argument is of an invalid type.
-      AssertionError or RuntimeError is an RPC is already in use.
-    """
-    self.callback = callback or self.callback
-    self.package = package or self.package
-    self.call = call or self.call
-    self.request = request or self.request
-    self.response = response or self.response
-
-    assert self.__state is RPC.IDLE, ("RPC for %s.%s has already been started" %
-                                      (self.package, self.call))
-    assert self.callback is None or callable(self.callback)
-    assert isinstance(self.request, ProtocolBuffer.ProtocolMessage)
-    assert isinstance(self.response, ProtocolBuffer.ProtocolMessage)
-
-    e = ProtocolBuffer.Encoder()
-    self.request.Output(e)
-
-    self.__state = RPC.RUNNING
-    _apphosting_runtime___python__apiproxy.MakeCall(
-        self.package, self.call, e.buffer(), self.__result_dict,
-        self.__MakeCallDone, self)
-
-  def Wait(self):
+  def _WaitImpl(self):
     """Waits on the API call associated with this RPC. The callback,
     if provided, will be executed before Wait() returns. If this RPC
     is already complete, or if the RPC was never started, this
@@ -159,42 +102,32 @@
     """
     try:
       rpc_completed = _apphosting_runtime___python__apiproxy.Wait(self)
-    except runtime.DeadlineExceededError:
-      raise
-    except apiproxy_errors.InterruptedError:
+    except (runtime.DeadlineExceededError, apiproxy_errors.InterruptedError):
       raise
     except:
       exc_class, exc, tb = sys.exc_info()
       if (isinstance(exc, SystemError) and
-          exc.args == ('uncaught RPC exception',)):
+          exc.args[0] == 'uncaught RPC exception'):
         raise
       rpc = None
       if hasattr(exc, "_appengine_apiproxy_rpc"):
         rpc = exc._appengine_apiproxy_rpc
       new_exc = apiproxy_errors.InterruptedError(exc, rpc)
       raise new_exc.__class__, new_exc, tb
+    return True
 
-    assert rpc_completed, ("RPC for %s.%s was not completed, and no other " +
-                           "exception was raised " % (self.package, self.call))
-
-  def CheckSuccess(self):
-    """If there was an exception, raise it now.
+  def _MakeCallImpl(self):
+    assert isinstance(self.request, ProtocolBuffer.ProtocolMessage)
+    assert isinstance(self.response, ProtocolBuffer.ProtocolMessage)
 
-    Raises:
-      Exception of the API call or the callback, if any.
-    """
-    if self.exception and self.__traceback:
-      raise self.exception.__class__, self.exception, self.__traceback
-    if self.exception:
-      raise self.exception
+    e = ProtocolBuffer.Encoder()
+    self.request.Output(e)
 
-  @property
-  def exception(self):
-    return self.__exception
+    self.__state = RPC.RUNNING
 
-  @property
-  def state(self):
-    return self.__state
+    _apphosting_runtime___python__apiproxy.MakeCall(
+        self.package, self.call, e.buffer(), self.__result_dict,
+        self.__MakeCallDone, self)
 
   def __MakeCallDone(self):
     self.__state = RPC.FINISHING
@@ -210,7 +143,7 @@
         self.__exception = apiproxy_errors.CapabilityDisabledError(
             "The API call %s.%s() is temporarily unavailable." % (
             self.package, self.call))
-    elif _ExceptionsMap.has_key(self.__result_dict['error']):
+    elif self.__result_dict['error'] in _ExceptionsMap:
       exception_entry = _ExceptionsMap[self.__result_dict['error']]
       self.__exception = exception_entry[0](
           exception_entry[1] % (self.package, self.call))
@@ -219,13 +152,17 @@
         self.response.ParseFromString(self.__result_dict['result_string'])
       except Exception, e:
         self.__exception = e
-    if self.callback:
-      try:
-        self.callback()
-      except:
-        exc_class, self.__exception, self.__traceback = sys.exc_info()
-        self.__exception._appengine_apiproxy_rpc = self
-        raise
+    self.__Callback()
+
+def CreateRPC():
+  """Create a RPC instance. suitable for talking to remote services.
+
+  Each RPC instance can be used only once, and should not be reused.
+
+  Returns:
+    an instance of RPC object
+  """
+  return RPC()
 
 
 def MakeSyncCall(package, call, request, response):
@@ -240,7 +177,7 @@
   Raises:
     See CheckSuccess() above.
   """
-  rpc = RPC()
+  rpc = CreateRPC()
   rpc.MakeCall(package, call, request, response)
   rpc.Wait()
   rpc.CheckSuccess()
--- a/thirdparty/google_appengine/google/appengine/tools/appcfg.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/appcfg.py	Tue Jan 20 13:19:45 2009 +0000
@@ -29,7 +29,7 @@
 """
 
 
-import cookielib
+import calendar
 import datetime
 import getpass
 import logging
@@ -38,20 +38,21 @@
 import os
 import re
 import sha
-import socket
 import sys
 import tempfile
 import time
-import urllib
 import urllib2
 
 import google
+import yaml
+from google.appengine.cron import groctimespecification
 from google.appengine.api import appinfo
+from google.appengine.api import croninfo
 from google.appengine.api import validation
 from google.appengine.api import yaml_errors
 from google.appengine.api import yaml_object
 from google.appengine.datastore import datastore_index
-import yaml
+from google.appengine.tools import appengine_rpc
 
 
 MAX_FILES_TO_CLONE = 100
@@ -69,6 +70,9 @@
 verbosity = 1
 
 
+appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = "python"
+
+
 def StatusUpdate(msg):
   """Print a status message to stderr.
 
@@ -81,289 +85,6 @@
     print >>sys.stderr, msg
 
 
-class ClientLoginError(urllib2.HTTPError):
-  """Raised to indicate there was an error authenticating with ClientLogin."""
-
-  def __init__(self, url, code, msg, headers, args):
-    urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
-    self.args = args
-    self.reason = args["Error"]
-
-
-class AbstractRpcServer(object):
-  """Provides a common interface for a simple RPC server."""
-
-  def __init__(self, host, auth_function, host_override=None,
-               extra_headers=None, save_cookies=False):
-    """Creates a new HttpRpcServer.
-
-    Args:
-      host: The host to send requests to.
-      auth_function: A function that takes no arguments and returns an
-        (email, password) tuple when called. Will be called if authentication
-        is required.
-      host_override: The host header to send to the server (defaults to host).
-      extra_headers: A dict of extra headers to append to every request. Values
-        supplied here will override other default headers that are supplied.
-      save_cookies: If True, save the authentication cookies to local disk.
-        If False, use an in-memory cookiejar instead.  Subclasses must
-        implement this functionality.  Defaults to False.
-    """
-    self.host = host
-    self.host_override = host_override
-    self.auth_function = auth_function
-    self.authenticated = False
-
-    self.extra_headers = {
-      "User-agent": GetUserAgent()
-    }
-    if extra_headers:
-      self.extra_headers.update(extra_headers)
-
-    self.save_cookies = save_cookies
-    self.cookie_jar = cookielib.MozillaCookieJar()
-    self.opener = self._GetOpener()
-    if self.host_override:
-      logging.info("Server: %s; Host: %s", self.host, self.host_override)
-    else:
-      logging.info("Server: %s", self.host)
-
-  def _GetOpener(self):
-    """Returns an OpenerDirector for making HTTP requests.
-
-    Returns:
-      A urllib2.OpenerDirector object.
-    """
-    raise NotImplemented()
-
-  def _CreateRequest(self, url, data=None):
-    """Creates a new urllib request."""
-    logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
-    req = urllib2.Request(url, data=data)
-    if self.host_override:
-      req.add_header("Host", self.host_override)
-    for key, value in self.extra_headers.iteritems():
-      req.add_header(key, value)
-    return req
-
-  def _GetAuthToken(self, email, password):
-    """Uses ClientLogin to authenticate the user, returning an auth token.
-
-    Args:
-      email:    The user's email address
-      password: The user's password
-
-    Raises:
-      ClientLoginError: If there was an error authenticating with ClientLogin.
-      HTTPError: If there was some other form of HTTP error.
-
-    Returns:
-      The authentication token returned by ClientLogin.
-    """
-    req = self._CreateRequest(
-        url="https://www.google.com/accounts/ClientLogin",
-        data=urllib.urlencode({
-            "Email": email,
-            "Passwd": password,
-            "service": "ah",
-            "source": "Google-appcfg-1.0",
-            "accountType": "HOSTED_OR_GOOGLE"
-        })
-    )
-    try:
-      response = self.opener.open(req)
-      response_body = response.read()
-      response_dict = dict(x.split("=")
-                           for x in response_body.split("\n") if x)
-      return response_dict["Auth"]
-    except urllib2.HTTPError, e:
-      if e.code == 403:
-        body = e.read()
-        response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
-        raise ClientLoginError(req.get_full_url(), e.code, e.msg,
-                               e.headers, response_dict)
-      else:
-        raise
-
-  def _GetAuthCookie(self, auth_token):
-    """Fetches authentication cookies for an authentication token.
-
-    Args:
-      auth_token: The authentication token returned by ClientLogin.
-
-    Raises:
-      HTTPError: If there was an error fetching the authentication cookies.
-    """
-    continue_location = "http://localhost/"
-    args = {"continue": continue_location, "auth": auth_token}
-    login_path = os.environ.get("APPCFG_LOGIN_PATH", "/_ah")
-    req = self._CreateRequest("http://%s%s/login?%s" %
-                              (self.host, login_path, urllib.urlencode(args)))
-    try:
-      response = self.opener.open(req)
-    except urllib2.HTTPError, e:
-      response = e
-    if (response.code != 302 or
-        response.info()["location"] != continue_location):
-      raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
-                              response.headers, response.fp)
-    self.authenticated = True
-
-  def _Authenticate(self):
-    """Authenticates the user.
-
-    The authentication process works as follows:
-     1) We get a username and password from the user
-     2) We use ClientLogin to obtain an AUTH token for the user
-        (see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
-     3) We pass the auth token to /_ah/login on the server to obtain an
-        authentication cookie. If login was successful, it tries to redirect
-        us to the URL we provided.
-
-    If we attempt to access the upload API without first obtaining an
-    authentication cookie, it returns a 401 response and directs us to
-    authenticate ourselves with ClientLogin.
-    """
-    for i in range(3):
-      credentials = self.auth_function()
-      try:
-        auth_token = self._GetAuthToken(credentials[0], credentials[1])
-      except ClientLoginError, e:
-        if e.reason == "BadAuthentication":
-          print >>sys.stderr, "Invalid username or password."
-          continue
-        if e.reason == "CaptchaRequired":
-          print >>sys.stderr, (
-              "Please go to\n"
-              "https://www.google.com/accounts/DisplayUnlockCaptcha\n"
-              "and verify you are a human.  Then try again.")
-          break;
-        if e.reason == "NotVerified":
-          print >>sys.stderr, "Account not verified."
-          break
-        if e.reason == "TermsNotAgreed":
-          print >>sys.stderr, "User has not agreed to TOS."
-          break
-        if e.reason == "AccountDeleted":
-          print >>sys.stderr, "The user account has been deleted."
-          break
-        if e.reason == "AccountDisabled":
-          print >>sys.stderr, "The user account has been disabled."
-          break
-        if e.reason == "ServiceDisabled":
-          print >>sys.stderr, ("The user's access to the service has been "
-                               "disabled.")
-          break
-        if e.reason == "ServiceUnavailable":
-          print >>sys.stderr, "The service is not available; try again later."
-          break
-        raise
-      self._GetAuthCookie(auth_token)
-      return
-
-  def Send(self, request_path, payload="",
-           content_type="application/octet-stream",
-           timeout=None,
-           **kwargs):
-    """Sends an RPC and returns the response.
-
-    Args:
-      request_path: The path to send the request to, eg /api/appversion/create.
-      payload: The body of the request, or None to send an empty request.
-      content_type: The Content-Type header to use.
-      timeout: timeout in seconds; default None i.e. no timeout.
-        (Note: for large requests on OS X, the timeout doesn't work right.)
-      kwargs: Any keyword arguments are converted into query string parameters.
-
-    Returns:
-      The response body, as a string.
-    """
-    if not self.authenticated:
-      self._Authenticate()
-
-    old_timeout = socket.getdefaulttimeout()
-    socket.setdefaulttimeout(timeout)
-    try:
-      tries = 0
-      while True:
-        tries += 1
-        args = dict(kwargs)
-        url = "http://%s%s?%s" % (self.host, request_path,
-                                  urllib.urlencode(args))
-        req = self._CreateRequest(url=url, data=payload)
-        req.add_header("Content-Type", content_type)
-        req.add_header("X-appcfg-api-version", "1")
-        try:
-          f = self.opener.open(req)
-          response = f.read()
-          f.close()
-          return response
-        except urllib2.HTTPError, e:
-          if tries > 3:
-            raise
-          elif e.code == 401:
-            self._Authenticate()
-          elif e.code >= 500 and e.code < 600:
-            continue
-          else:
-            raise
-    finally:
-      socket.setdefaulttimeout(old_timeout)
-
-
-class HttpRpcServer(AbstractRpcServer):
-  """Provides a simplified RPC-style interface for HTTP requests."""
-
-  DEFAULT_COOKIE_FILE_PATH = "~/.appcfg_cookies"
-
-  def _Authenticate(self):
-    """Save the cookie jar after authentication."""
-    super(HttpRpcServer, self)._Authenticate()
-    if self.cookie_jar.filename is not None and self.save_cookies:
-      StatusUpdate("Saving authentication cookies to %s" %
-                   self.cookie_jar.filename)
-      self.cookie_jar.save()
-
-  def _GetOpener(self):
-    """Returns an OpenerDirector that supports cookies and ignores redirects.
-
-    Returns:
-      A urllib2.OpenerDirector object.
-    """
-    opener = urllib2.OpenerDirector()
-    opener.add_handler(urllib2.ProxyHandler())
-    opener.add_handler(urllib2.UnknownHandler())
-    opener.add_handler(urllib2.HTTPHandler())
-    opener.add_handler(urllib2.HTTPDefaultErrorHandler())
-    opener.add_handler(urllib2.HTTPSHandler())
-    opener.add_handler(urllib2.HTTPErrorProcessor())
-
-    if self.save_cookies:
-      self.cookie_jar.filename = os.path.expanduser(HttpRpcServer.DEFAULT_COOKIE_FILE_PATH)
-
-      if os.path.exists(self.cookie_jar.filename):
-        try:
-          self.cookie_jar.load()
-          self.authenticated = True
-          StatusUpdate("Loaded authentication cookies from %s" %
-                       self.cookie_jar.filename)
-        except (OSError, IOError, cookielib.LoadError), e:
-          logging.debug("Could not load authentication cookies; %s: %s",
-                        e.__class__.__name__, e)
-          self.cookie_jar.filename = None
-      else:
-        try:
-          fd = os.open(self.cookie_jar.filename, os.O_CREAT, 0600)
-          os.close(fd)
-        except (OSError, IOError), e:
-          logging.debug("Could not create authentication cookies file; %s: %s",
-                        e.__class__.__name__, e)
-          self.cookie_jar.filename = None
-
-    opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
-    return opener
-
-
 def GetMimeTypeIfStaticFile(config, filename):
   """Looks up the mime type for 'filename'.
 
@@ -427,8 +148,8 @@
   """
 
   ATTRIBUTES = {
-    "timestamp": validation.TYPE_FLOAT,
-    "opt_in": validation.Optional(validation.TYPE_BOOL),
+      "timestamp": validation.TYPE_FLOAT,
+      "opt_in": validation.Optional(validation.TYPE_BOOL),
   }
 
   @staticmethod
@@ -448,7 +169,8 @@
   """Gets the version of the SDK by parsing the VERSION file.
 
   Args:
-    isfile, open_fn: Used for testing.
+    isfile: used for testing.
+    open_fn: Used for testing.
 
   Returns:
     A Yaml object or None if the VERSION file does not exist.
@@ -497,11 +219,9 @@
       server: The AbstractRpcServer to use.
       config: The yaml object that specifies the configuration of this
         application.
-
-    Args for testing:
-      isdir: Replacement for os.path.isdir.
-      isfile: Replacement for os.path.isfile.
-      open: Replacement for the open builtin.
+      isdir: Replacement for os.path.isdir (for testing).
+      isfile: Replacement for os.path.isfile (for testing).
+      open_fn: Replacement for the open builtin (for testing).
     """
     self.server = server
     self.config = config
@@ -514,7 +234,7 @@
     """Returns the filename for the nag file for this user."""
     user_homedir = os.path.expanduser("~/")
     if not os.path.isdir(user_homedir):
-      drive, tail = os.path.splitdrive(os.__file__)
+      drive, unused_tail = os.path.splitdrive(os.__file__)
       if drive:
         os.environ["HOMEDRIVE"] = drive
 
@@ -690,6 +410,9 @@
     save the response in the nag file.  Subsequent calls to this function
     will re-use that response.
 
+    Args:
+      input_fn: used to collect user input. This is for testing only.
+
     Returns:
       True if the user wants to check for updates.  False otherwise.
     """
@@ -731,6 +454,7 @@
     self.definitions = definitions
 
   def DoUpload(self):
+    """Uploads the index definitions."""
     StatusUpdate("Uploading index definitions.")
     self.server.Send("/api/datastore/index/add",
                      app_id=self.config.application,
@@ -738,6 +462,31 @@
                      payload=self.definitions.ToYAML())
 
 
+class CronEntryUpload(object):
+  """Provides facilities to upload cron entries to the hosting service."""
+
+  def __init__(self, server, config, cron):
+    """Creates a new CronEntryUpload.
+
+    Args:
+      server: The RPC server to use.  Should be an instance of a subclass of
+      AbstractRpcServer
+      config: The AppInfoExternal object derived from the app.yaml file.
+      cron: The CronInfoExternal object loaded from the cron.yaml file.
+    """
+    self.server = server
+    self.config = config
+    self.cron = cron
+
+  def DoUpload(self):
+    """Uploads the cron entries."""
+    StatusUpdate("Uploading cron entries.")
+    self.server.Send("/api/datastore/cron/update",
+                     app_id=self.config.application,
+                     version=self.config.version,
+                     payload=self.cron.ToYAML())
+
+
 class IndexOperation(object):
   """Provide facilities for writing Index operation commands."""
 
@@ -840,11 +589,11 @@
           "Are you sure you want to delete this index? (N/y/a): ")
       confirmation = confirmation.strip().lower()
 
-      if confirmation == 'y':
+      if confirmation == "y":
         return True
-      elif confirmation == 'n' or confirmation == '':
+      elif confirmation == "n" or not confirmation:
         return False
-      elif confirmation == 'a':
+      elif confirmation == "a":
         self.force = True
         return True
       else:
@@ -868,28 +617,28 @@
       definitions: datastore_index.IndexDefinitions as loaded from users
         index.yaml file.
     """
-    new_indexes, unused_indexes = self.DoDiff(definitions)
+    unused_new_indexes, notused_indexes = self.DoDiff(definitions)
 
     deletions = datastore_index.IndexDefinitions(indexes=[])
-    if unused_indexes.indexes is not None:
-      for index in unused_indexes.indexes:
+    if notused_indexes.indexes is not None:
+      for index in notused_indexes.indexes:
         if self.force or self.GetConfirmation(index):
           deletions.indexes.append(index)
 
-    if len(deletions.indexes) > 0:
+    if deletions.indexes:
       not_deleted = self.DoDelete(deletions)
 
       if not_deleted.indexes:
         not_deleted_count = len(not_deleted.indexes)
         if not_deleted_count == 1:
-          warning_message = ('An index was not deleted.  Most likely this is '
-                             'because it no longer exists.\n\n')
+          warning_message = ("An index was not deleted.  Most likely this is "
+                             "because it no longer exists.\n\n")
         else:
-          warning_message = ('%d indexes were not deleted.  Most likely this '
-                             'is because they no longer exist.\n\n'
+          warning_message = ("%d indexes were not deleted.  Most likely this "
+                             "is because they no longer exist.\n\n"
                              % not_deleted_count)
         for index in not_deleted.indexes:
-          warning_message = warning_message + index.ToYAML()
+          warning_message += index.ToYAML()
         logging.warning(warning_message)
 
 
@@ -925,6 +674,7 @@
     self.valid_dates = None
     if self.num_days:
       patterns = []
+      now = PacificTime(now)
       for i in xrange(self.num_days):
         then = time.gmtime(now - 24*3600 * i)
         patterns.append(re.escape(time.strftime("%d/%m/%Y", then)))
@@ -984,25 +734,25 @@
       request should be issued; or None, if not.
     """
     logging.info("Request with offset %r.", offset)
-    kwds = {'app_id': self.config.application,
-            'version': self.version_id,
-            'limit': 100,
-            }
+    kwds = {"app_id": self.config.application,
+            "version": self.version_id,
+            "limit": 100,
+           }
     if offset:
-      kwds['offset'] = offset
+      kwds["offset"] = offset
     if self.severity is not None:
-      kwds['severity'] = str(self.severity)
+      kwds["severity"] = str(self.severity)
     response = self.server.Send("/api/request_logs", payload=None, **kwds)
     response = response.replace("\r", "\0")
     lines = response.splitlines()
     logging.info("Received %d bytes, %d records.", len(response), len(lines))
     offset = None
-    if lines and lines[0].startswith('#'):
-      match = re.match(r'^#\s*next_offset=(\S+)\s*$', lines[0])
+    if lines and lines[0].startswith("#"):
+      match = re.match(r"^#\s*next_offset=(\S+)\s*$", lines[0])
       del lines[0]
       if match:
         offset = match.group(1)
-    if lines and lines[-1].startswith('#'):
+    if lines and lines[-1].startswith("#"):
       del lines[-1]
     valid_dates = self.valid_dates
     sentinel = self.sentinel
@@ -1015,13 +765,67 @@
            line[len_sentinel : len_sentinel+1] in ("", "\0")) or
           (valid_dates and not valid_dates.match(line))):
         return None
-      tf.write(line + '\n')
+      tf.write(line + "\n")
     if not lines:
       return None
     return offset
 
 
-def CopyReversedLines(input, output, blocksize=2**16):
+def PacificTime(now):
+  """Helper to return the number of seconds between UTC and Pacific time.
+
+  This is needed to compute today's date in Pacific time (more
+  specifically: Mountain View local time), which is how request logs
+  are reported.  (Google servers always report times in Mountain View
+  local time, regardless of where they are physically located.)
+
+  This takes (post-2006) US DST into account.  Pacific time is either
+  8 hours or 7 hours west of UTC, depending on whether DST is in
+  effect.  Since 2007, US DST starts on the Second Sunday in March
+  March, and ends on the first Sunday in November.  (Reference:
+  http://aa.usno.navy.mil/faq/docs/daylight_time.php.)
+
+  Note that the server doesn't report its local time (the HTTP Date
+  header uses UTC), and the client's local time is irrelevant.
+
+  Args:
+    A posix timestamp giving current UTC time.
+
+  Returns:
+    A pseudo-posix timestamp giving current Pacific time.  Passing
+    this through time.gmtime() will produce a tuple in Pacific local
+    time.
+  """
+  now -= 8*3600
+  if IsPacificDST(now):
+    now += 3600
+  return now
+
+
+def IsPacificDST(now):
+  """Helper for PacificTime to decide whether now is Pacific DST (PDT).
+
+  Args:
+    now: A pseudo-posix timestamp giving current time in PST.
+
+  Returns:
+    True if now falls within the range of DST, False otherwise.
+  """
+  DAY = 24*3600
+  SUNDAY = 6
+  pst = time.gmtime(now)
+  year = pst[0]
+  assert year >= 2007
+  begin = calendar.timegm((year, 3, 8, 2, 0, 0, 0, 0, 0))
+  while time.gmtime(begin).tm_wday != SUNDAY:
+    begin += DAY
+  end = calendar.timegm((year, 11, 1, 2, 0, 0, 0, 0, 0))
+  while time.gmtime(end).tm_wday != SUNDAY:
+    end += DAY
+  return begin <= now < end
+
+
+def CopyReversedLines(instream, outstream, blocksize=2**16):
   r"""Copy lines from input stream to output stream in reverse order.
 
   As a special feature, null bytes in the input are turned into
@@ -1030,20 +834,20 @@
   "A\0B\nC\0D\n", the output is "C\n\tD\nA\n\tB\n".
 
   Args:
-    input: A seekable stream open for reading in binary mode.
-    output: A stream open for writing; doesn't have to be seekable or binary.
+    instream: A seekable stream open for reading in binary mode.
+    outstream: A stream open for writing; doesn't have to be seekable or binary.
     blocksize: Optional block size for buffering, for unit testing.
 
   Returns:
     The number of lines copied.
   """
   line_count = 0
-  input.seek(0, 2)
-  last_block = input.tell() // blocksize
+  instream.seek(0, 2)
+  last_block = instream.tell() // blocksize
   spillover = ""
   for iblock in xrange(last_block + 1, -1, -1):
-    input.seek(iblock * blocksize)
-    data = input.read(blocksize)
+    instream.seek(iblock * blocksize)
+    data = instream.read(blocksize)
     lines = data.splitlines(True)
     lines[-1:] = "".join(lines[-1:] + [spillover]).splitlines(True)
     if lines and not lines[-1].endswith("\n"):
@@ -1054,7 +858,7 @@
     if lines:
       line_count += len(lines)
       data = "".join(lines).replace("\0", "\n\t")
-      output.write(data)
+      outstream.write(data)
   return line_count
 
 
@@ -1130,14 +934,14 @@
   def _Hash(self, content):
     """Compute the hash of the content.
 
-    Arg:
+    Args:
       content: The data to hash as a string.
 
     Returns:
       The string representation of the hash.
     """
     h = sha.new(content).hexdigest()
-    return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
+    return "%s_%s_%s_%s_%s" % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
 
   def AddFile(self, path, file_handle):
     """Adds the provided file to the list to be pushed to the server.
@@ -1150,7 +954,7 @@
     assert file_handle is not None
 
     reason = appinfo.ValidFilename(path)
-    if reason != '':
+    if reason:
       logging.error(reason)
       return
 
@@ -1188,7 +992,14 @@
     files_to_upload = {}
 
     def CloneFiles(url, files, file_type):
-      if len(files) == 0:
+      """Sends files to the given url.
+
+      Args:
+        url: the server URL to use.
+        files: a list of files
+        file_type: the type of the files
+      """
+      if not files:
         return
 
       StatusUpdate("Cloning %d %s file%s." %
@@ -1208,7 +1019,7 @@
     CloneFiles("/api/appversion/cloneblobs", blobs_to_clone, "static")
     CloneFiles("/api/appversion/clonefiles", files_to_clone, "application")
 
-    logging.info('Files to upload: ' + str(files_to_upload))
+    logging.info("Files to upload: " + str(files_to_upload))
 
     self.files = files_to_upload
     return sorted(files_to_upload.iterkeys())
@@ -1280,6 +1091,7 @@
     """
     logging.info("Reading app configuration.")
 
+    path = ""
     try:
       StatusUpdate("Scanning files on local disk.")
       num_files = 0
@@ -1313,7 +1125,7 @@
 
     try:
       missing_files = self.Begin()
-      if len(missing_files) > 0:
+      if missing_files:
         StatusUpdate("Uploading %d files." % len(missing_files))
         num_files = 0
         for missing_file in missing_files:
@@ -1383,34 +1195,16 @@
   return length
 
 
-def GetPlatformToken(os_module=os, sys_module=sys, platform=sys.platform):
-  """Returns a 'User-agent' token for the host system platform.
-
-  Args:
-    os_module, sys_module, platform: Used for testing.
-
-  Returns:
-    String containing the platform token for the host system.
-  """
-  if hasattr(sys_module, "getwindowsversion"):
-    windows_version = sys_module.getwindowsversion()
-    version_info = ".".join(str(i) for i in windows_version[:4])
-    return platform + "/" + version_info
-  elif hasattr(os_module, "uname"):
-    uname = os_module.uname()
-    return "%s/%s" % (uname[0], uname[2])
-  else:
-    return "unknown"
-
-
-def GetUserAgent(get_version=GetVersionObject, get_platform=GetPlatformToken):
+def GetUserAgent(get_version=GetVersionObject,
+                 get_platform=appengine_rpc.GetPlatformToken):
   """Determines the value of the 'User-agent' header to use for HTTP requests.
 
   If the 'APPCFG_SDK_NAME' environment variable is present, that will be
   used as the first product token in the user-agent.
 
   Args:
-    get_version, get_platform: Used for testing.
+    get_version: Used for testing.
+    get_platform: Used for testing.
 
   Returns:
     String containing the 'user-agent' header value, which includes the SDK
@@ -1439,6 +1233,16 @@
   return " ".join(product_tokens)
 
 
+def GetSourceName(get_version=GetVersionObject):
+  """Gets the name of this source version."""
+  version = get_version()
+  if version is None:
+    release = "unknown"
+  else:
+    release = version["release"]
+  return "Google-appcfg-%s" % (release,)
+
+
 class AppCfgApp(object):
   """Singleton class to wrap AppCfg tool functionality.
 
@@ -1464,7 +1268,7 @@
   """
 
   def __init__(self, argv, parser_class=optparse.OptionParser,
-               rpc_server_class=HttpRpcServer,
+               rpc_server_class=appengine_rpc.HttpRpcServer,
                raw_input_fn=raw_input,
                password_input_fn=getpass.getpass,
                error_fh=sys.stderr):
@@ -1521,14 +1325,17 @@
     Catches any HTTPErrors raised by the action and prints them to stderr.
     """
     try:
-      self.action.function(self)
+      self.action(self)
     except urllib2.HTTPError, e:
       body = e.read()
       print >>self.error_fh, ("Error %d: --- begin server output ---\n"
                               "%s\n--- end server output ---" %
                               (e.code, body.rstrip("\n")))
+      return 1
     except yaml_errors.EventListenerError, e:
       print >>self.error_fh, ("Error parsing yaml file:\n%s" % e)
+      return 1
+    return 0
 
   def _GetActionDescriptions(self):
     """Returns a formatted string containing the short_descs for all actions."""
@@ -1548,7 +1355,9 @@
 
     class Formatter(optparse.IndentedHelpFormatter):
       """Custom help formatter that does not reformat the description."""
+
       def format_description(self, description):
+        """Very simple formatter."""
         return description + "\n"
 
     desc = self._GetActionDescriptions()
@@ -1600,7 +1409,7 @@
     parser.set_usage(action.usage)
     parser.set_description("%s\n%s" % (action.short_desc, action.long_desc))
     action.options(self, parser)
-    options, args = parser.parse_args(self.argv[1:])
+    options, unused_args = parser.parse_args(self.argv[1:])
     return parser, options
 
   def _PrintHelpAndExit(self, exit_code=2):
@@ -1641,15 +1450,24 @@
       server = self.rpc_server_class(
           self.options.server,
           lambda: (email, "password"),
+          GetUserAgent(),
+          GetSourceName(),
           host_override=self.options.host,
-          extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
           save_cookies=self.options.save_cookies)
       server.authenticated = True
       return server
 
+    if self.options.passin:
+      auth_tries = 1
+    else:
+      auth_tries = 3
+
     return self.rpc_server_class(self.options.server, GetUserCredentials,
+                                 GetUserAgent(), GetSourceName(),
                                  host_override=self.options.host,
-                                 save_cookies=self.options.save_cookies)
+                                 save_cookies=self.options.save_cookies,
+                                 auth_tries=auth_tries,
+                                 account_type="HOSTED_OR_GOOGLE")
 
   def _FindYaml(self, basepath, file_name):
     """Find yaml files in application directory.
@@ -1664,7 +1482,7 @@
     if not os.path.isdir(basepath):
       self.parser.error("Not a directory: %s" % basepath)
 
-    for yaml_file in (file_name + '.yaml', file_name + '.yml'):
+    for yaml_file in (file_name + ".yaml", file_name + ".yml"):
       yaml_path = os.path.join(basepath, yaml_file)
       if os.path.isfile(yaml_path):
         return yaml_path
@@ -1674,6 +1492,9 @@
   def _ParseAppYaml(self, basepath):
     """Parses the app.yaml file.
 
+    Args:
+      basepath: the directory of the application.
+
     Returns:
       An AppInfoExternal object.
     """
@@ -1692,6 +1513,9 @@
   def _ParseIndexYaml(self, basepath):
     """Parses the index.yaml file.
 
+    Args:
+      basepath: the directory of the application.
+
     Returns:
       A single parsed yaml file or None if the file does not exist.
     """
@@ -1705,6 +1529,25 @@
       return index_defs
     return None
 
+  def _ParseCronYaml(self, basepath):
+    """Parses the cron.yaml file.
+
+    Args:
+      basepath: the directory of the application.
+
+    Returns:
+      A CronInfoExternal object.
+    """
+    file_name = self._FindYaml(basepath, "cron")
+    if file_name is not None:
+      fh = open(file_name, "r")
+      try:
+        cron_info = croninfo.LoadSingleCron(fh)
+      finally:
+        fh.close()
+      return cron_info
+    return None
+
   def Help(self):
     """Prints help for a specific action.
 
@@ -1716,7 +1559,7 @@
                         self._GetActionDescriptions())
 
     action = self.actions[self.args[0]]
-    self.parser, options = self._MakeSpecificParser(action)
+    self.parser, unused_options = self._MakeSpecificParser(action)
     self._PrintHelpAndExit(exit_code=0)
 
   def Update(self):
@@ -1745,8 +1588,13 @@
                      "%s\n--- end server output ---" %
                      (e.code, e.read().rstrip("\n")))
         print >> self.error_fh, (
-          "Your app was updated, but there was an error updating your indexes. "
-          "Please retry later with appcfg.py update_indexes.")
+            "Your app was updated, but there was an error updating your "
+            "indexes. Please retry later with appcfg.py update_indexes.")
+
+    cron_entries = self._ParseCronYaml(basepath)
+    if cron_entries:
+      cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
+      cron_upload.DoUpload()
 
   def _UpdateOptions(self, parser):
     """Adds update-specific options to 'parser'.
@@ -1786,6 +1634,20 @@
                       default=False,
                       help="Force deletion without being prompted.")
 
+  def UpdateCron(self):
+    """Updates any new or changed cron definitions."""
+    if len(self.args) != 1:
+      self.parser.error("Expected a single <directory> argument.")
+
+    basepath = self.args[0]
+    appyaml = self._ParseAppYaml(basepath)
+    rpc_server = self._GetRpcServer()
+
+    cron_entries = self._ParseCronYaml(basepath)
+    if cron_entries:
+      cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
+      cron_upload.DoUpload()
+
   def UpdateIndexes(self):
     """Updates indexes."""
     if len(self.args) != 1:
@@ -1835,7 +1697,7 @@
     logs_requester.DownloadLogs()
 
   def _RequestLogsOptions(self, parser):
-    """Ads request_logs-specific options to 'parser'.
+    """Adds request_logs-specific options to 'parser'.
 
     Args:
       parser: An instance of OptionsParser.
@@ -1843,25 +1705,64 @@
     parser.add_option("-n", "--num_days", type="int", dest="num_days",
                       action="store", default=None,
                       help="Number of days worth of log data to get. "
-                           "The cut-off point is midnight UTC. "
-                           "Use 0 to get all available logs. "
-                           "Default is 1, unless --append is also given; "
-                           "then the default is 0.")
+                      "The cut-off point is midnight UTC. "
+                      "Use 0 to get all available logs. "
+                      "Default is 1, unless --append is also given; "
+                      "then the default is 0.")
     parser.add_option("-a", "--append", dest="append",
-                       action="store_true", default=False,
+                      action="store_true", default=False,
                       help="Append to existing file.")
     parser.add_option("--severity", type="int", dest="severity",
                       action="store", default=None,
                       help="Severity of app-level log messages to get. "
-                           "The range is 0 (DEBUG) through 4 (CRITICAL). "
-                           "If omitted, only request logs are returned.")
+                      "The range is 0 (DEBUG) through 4 (CRITICAL). "
+                      "If omitted, only request logs are returned.")
+
+  def CronInfo(self, now=None, output=sys.stdout):
+    """Displays information about cron definitions.
+
+    Args:
+      now: used for testing.
+      output: Used for testing.
+    """
+    if len(self.args) != 1:
+      self.parser.error("Expected a single <directory> argument.")
+    if now is None:
+      now = datetime.datetime.now()
+
+    basepath = self.args[0]
+    cron_entries = self._ParseCronYaml(basepath)
+    if cron_entries:
+      for entry in cron_entries.cron:
+        description = entry.description
+        if not description:
+          description = "<no description>"
+        print >>output, "\n%s:\nURL: %s\nSchedule: %s" % (description,
+                                                          entry.schedule,
+                                                          entry.url)
+        schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
+        matches = schedule.GetMatches(now, self.options.num_runs)
+        for match in matches:
+          print >>output, "%s, %s from now" % (
+              match.strftime("%Y-%m-%d %H:%M:%S"), match - now)
+
+  def _CronInfoOptions(self, parser):
+    """Adds cron_info-specific options to 'parser'.
+
+    Args:
+      parser: An instance of OptionsParser.
+    """
+    parser.add_option("-n", "--num_runs", type="int", dest="num_runs",
+                      action="store", default=5,
+                      help="Number of runs of each cron job to display"
+                      "Default is 5")
 
   class Action(object):
     """Contains information about a command line action.
 
     Attributes:
-      function: An AppCfgApp function that will perform the appropriate
-        action.
+      function: The name of a function defined on AppCfg or its subclasses
+        that will perform the appropriate action.
       usage: A command line usage string.
       short_desc: A one-line description of the action.
       long_desc: A detailed description of the action.  Whitespace and
@@ -1879,39 +1780,56 @@
       self.long_desc = long_desc
       self.options = options
 
+    def __call__(self, appcfg):
+      """Invoke this Action on the specified AppCfg.
+
+      This calls the function of the appropriate name on AppCfg, and
+      respects polymophic overrides."""
+      method = getattr(appcfg, self.function)
+      return method()
+
   actions = {
 
       "help": Action(
-        function=Help,
-        usage="%prog help <action>",
-        short_desc="Print help for a specific action."),
+          function="Help",
+          usage="%prog help <action>",
+          short_desc="Print help for a specific action."),
 
       "update": Action(
-        function=Update,
-        usage="%prog [options] update <directory>",
-        options=_UpdateOptions,
-        short_desc="Create or update an app version.",
-        long_desc="""
+          function="Update",
+          usage="%prog [options] update <directory>",
+          options=_UpdateOptions,
+          short_desc="Create or update an app version.",
+          long_desc="""
 Specify a directory that contains all of the files required by
 the app, and appcfg.py will create/update the app version referenced
 in the app.yaml file at the top level of that directory.  appcfg.py
 will follow symlinks and recursively upload all files to the server.
 Temporary or source control files (e.g. foo~, .svn/*) will be skipped."""),
 
+
+
+
+
+
+
+
+
+
       "update_indexes": Action(
-        function=UpdateIndexes,
-        usage="%prog [options] update_indexes <directory>",
-        short_desc="Update application indexes.",
-        long_desc="""
+          function="UpdateIndexes",
+          usage="%prog [options] update_indexes <directory>",
+          short_desc="Update application indexes.",
+          long_desc="""
 The 'update_indexes' command will add additional indexes which are not currently
 in production as well as restart any indexes that were not completed."""),
 
       "vacuum_indexes": Action(
-        function=VacuumIndexes,
-        usage="%prog [options] vacuum_indexes <directory>",
-        options=_VacuumIndexesOptions,
-        short_desc="Delete unused indexes from application.",
-        long_desc="""
+          function="VacuumIndexes",
+          usage="%prog [options] vacuum_indexes <directory>",
+          options=_VacuumIndexesOptions,
+          short_desc="Delete unused indexes from application.",
+          long_desc="""
 The 'vacuum_indexes' command will help clean up indexes which are no longer
 in use.  It does this by comparing the local index configuration with
 indexes that are actually defined on the server.  If any indexes on the
@@ -1919,24 +1837,36 @@
 option to delete them."""),
 
       "rollback": Action(
-        function=Rollback,
-        usage="%prog [options] rollback <directory>",
-        short_desc="Rollback an in-progress update.",
-        long_desc="""
+          function="Rollback",
+          usage="%prog [options] rollback <directory>",
+          short_desc="Rollback an in-progress update.",
+          long_desc="""
 The 'update' command requires a server-side transaction.  Use 'rollback'
 if you get an error message about another transaction being in progress
 and you are sure that there is no such transaction."""),
 
       "request_logs": Action(
-        function=RequestLogs,
-        usage="%prog [options] request_logs <directory> <output_file>",
-        options=_RequestLogsOptions,
-        short_desc="Write request logs in Apache common log format.",
-        long_desc="""
+          function="RequestLogs",
+          usage="%prog [options] request_logs <directory> <output_file>",
+          options=_RequestLogsOptions,
+          short_desc="Write request logs in Apache common log format.",
+          long_desc="""
 The 'request_logs' command exports the request logs from your application
 to a file.  It will write Apache common log format records ordered
 chronologically.  If output file is '-' stdout will be written."""),
 
+
+
+
+
+
+
+
+
+
+
+
+
   }
 
 
@@ -1944,7 +1874,9 @@
   logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
                               "%(lineno)s %(message)s "))
   try:
-    AppCfgApp(argv).Run()
+    result = AppCfgApp(argv).Run()
+    if result:
+      sys.exit(result)
   except KeyboardInterrupt:
     StatusUpdate("Interrupted.")
     sys.exit(1)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/appengine_rpc.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,388 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Tool for performing authenticated RPCs against App Engine."""
+
+
+import cookielib
+import logging
+import os
+import re
+import socket
+import sys
+import urllib
+import urllib2
+
+
+https_handler = urllib2.HTTPSHandler
+uses_cert_verification = False
+certpath = os.path.join(os.path.dirname(__file__), "cacerts.txt")
+cert_file_available = os.path.exists(certpath)
+try:
+  import https_wrapper
+  if cert_file_available:
+    https_handler = lambda: https_wrapper.CertValidatingHTTPSHandler(
+        ca_certs=certpath)
+    uses_cert_verification = True
+except ImportError:
+  pass
+
+
+def GetPlatformToken(os_module=os, sys_module=sys, platform=sys.platform):
+  """Returns a 'User-agent' token for the host system platform.
+
+  Args:
+    os_module, sys_module, platform: Used for testing.
+
+  Returns:
+    String containing the platform token for the host system.
+  """
+  if hasattr(sys_module, "getwindowsversion"):
+    windows_version = sys_module.getwindowsversion()
+    version_info = ".".join(str(i) for i in windows_version[:4])
+    return platform + "/" + version_info
+  elif hasattr(os_module, "uname"):
+    uname = os_module.uname()
+    return "%s/%s" % (uname[0], uname[2])
+  else:
+    return "unknown"
+
+
+class ClientLoginError(urllib2.HTTPError):
+  """Raised to indicate there was an error authenticating with ClientLogin."""
+
+  def __init__(self, url, code, msg, headers, args):
+    urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
+    self.args = args
+    self.reason = args["Error"]
+
+
+class AbstractRpcServer(object):
+  """Provides a common interface for a simple RPC server."""
+
+  def __init__(self, host, auth_function, user_agent, source,
+               host_override=None, extra_headers=None, save_cookies=False,
+               auth_tries=3, account_type=None):
+    """Creates a new HttpRpcServer.
+
+    Args:
+      host: The host to send requests to.
+      auth_function: A function that takes no arguments and returns an
+        (email, password) tuple when called. Will be called if authentication
+        is required.
+      user_agent: The user-agent string to send to the server. Specify None to
+        omit the user-agent header.
+      source: The source to specify in authentication requests.
+      host_override: The host header to send to the server (defaults to host).
+      extra_headers: A dict of extra headers to append to every request. Values
+        supplied here will override other default headers that are supplied.
+      save_cookies: If True, save the authentication cookies to local disk.
+        If False, use an in-memory cookiejar instead.  Subclasses must
+        implement this functionality.  Defaults to False.
+      auth_tries: The number of times to attempt auth_function before failing.
+      account_type: One of GOOGLE, HOSTED_OR_GOOGLE, or None for automatic.
+    """
+    self.host = host
+    self.host_override = host_override
+    self.auth_function = auth_function
+    self.source = source
+    self.authenticated = False
+    self.auth_tries = auth_tries
+
+    self.account_type = account_type
+
+    self.extra_headers = {}
+    if user_agent:
+      self.extra_headers["User-Agent"] = user_agent
+    if extra_headers:
+      self.extra_headers.update(extra_headers)
+
+    self.save_cookies = save_cookies
+    self.cookie_jar = cookielib.MozillaCookieJar()
+    self.opener = self._GetOpener()
+    if self.host_override:
+      logging.info("Server: %s; Host: %s", self.host, self.host_override)
+    else:
+      logging.info("Server: %s", self.host)
+
+    if ((self.host_override and self.host_override == "localhost") or
+        self.host == "localhost" or self.host.startswith("localhost:")):
+      self._DevAppServerAuthenticate()
+
+  def _GetOpener(self):
+    """Returns an OpenerDirector for making HTTP requests.
+
+    Returns:
+      A urllib2.OpenerDirector object.
+    """
+    raise NotImplemented()
+
+  def _CreateRequest(self, url, data=None):
+    """Creates a new urllib request."""
+    req = urllib2.Request(url, data=data)
+    if self.host_override:
+      req.add_header("Host", self.host_override)
+    for key, value in self.extra_headers.iteritems():
+      req.add_header(key, value)
+    return req
+
+  def _GetAuthToken(self, email, password):
+    """Uses ClientLogin to authenticate the user, returning an auth token.
+
+    Args:
+      email:    The user's email address
+      password: The user's password
+
+    Raises:
+      ClientLoginError: If there was an error authenticating with ClientLogin.
+      HTTPError: If there was some other form of HTTP error.
+
+    Returns:
+      The authentication token returned by ClientLogin.
+    """
+    account_type = self.account_type
+    if not account_type:
+      if (self.host.split(':')[0].endswith(".google.com")
+          or (self.host_override
+              and self.host_override.split(':')[0].endswith(".google.com"))):
+        account_type = "HOSTED_OR_GOOGLE"
+      else:
+        account_type = "GOOGLE"
+    data = {
+        "Email": email,
+        "Passwd": password,
+        "service": "ah",
+        "source": self.source,
+        "accountType": account_type
+    }
+
+    req = self._CreateRequest(
+        url="https://www.google.com/accounts/ClientLogin",
+        data=urllib.urlencode(data))
+    try:
+      response = self.opener.open(req)
+      response_body = response.read()
+      response_dict = dict(x.split("=")
+                           for x in response_body.split("\n") if x)
+      return response_dict["Auth"]
+    except urllib2.HTTPError, e:
+      if e.code == 403:
+        body = e.read()
+        response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
+        raise ClientLoginError(req.get_full_url(), e.code, e.msg,
+                               e.headers, response_dict)
+      else:
+        raise
+
+  def _GetAuthCookie(self, auth_token):
+    """Fetches authentication cookies for an authentication token.
+
+    Args:
+      auth_token: The authentication token returned by ClientLogin.
+
+    Raises:
+      HTTPError: If there was an error fetching the authentication cookies.
+    """
+    continue_location = "http://localhost/"
+    args = {"continue": continue_location, "auth": auth_token}
+    login_path = os.environ.get("APPCFG_LOGIN_PATH", "/_ah")
+    req = self._CreateRequest("http://%s%s/login?%s" %
+                              (self.host, login_path, urllib.urlencode(args)))
+    try:
+      response = self.opener.open(req)
+    except urllib2.HTTPError, e:
+      response = e
+    if (response.code != 302 or
+        response.info()["location"] != continue_location):
+      raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
+                              response.headers, response.fp)
+    self.authenticated = True
+
+  def _Authenticate(self):
+    """Authenticates the user.
+
+    The authentication process works as follows:
+     1) We get a username and password from the user
+     2) We use ClientLogin to obtain an AUTH token for the user
+        (see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
+     3) We pass the auth token to /_ah/login on the server to obtain an
+        authentication cookie. If login was successful, it tries to redirect
+        us to the URL we provided.
+
+    If we attempt to access the upload API without first obtaining an
+    authentication cookie, it returns a 401 response and directs us to
+    authenticate ourselves with ClientLogin.
+    """
+    for unused_i in range(self.auth_tries):
+      credentials = self.auth_function()
+      try:
+        auth_token = self._GetAuthToken(credentials[0], credentials[1])
+      except ClientLoginError, e:
+        if e.reason == "BadAuthentication":
+          print >>sys.stderr, "Invalid username or password."
+          continue
+        if e.reason == "CaptchaRequired":
+          print >>sys.stderr, (
+              "Please go to\n"
+              "https://www.google.com/accounts/DisplayUnlockCaptcha\n"
+              "and verify you are a human.  Then try again.")
+          break
+        if e.reason == "NotVerified":
+          print >>sys.stderr, "Account not verified."
+          break
+        if e.reason == "TermsNotAgreed":
+          print >>sys.stderr, "User has not agreed to TOS."
+          break
+        if e.reason == "AccountDeleted":
+          print >>sys.stderr, "The user account has been deleted."
+          break
+        if e.reason == "AccountDisabled":
+          print >>sys.stderr, "The user account has been disabled."
+          break
+        if e.reason == "ServiceDisabled":
+          print >>sys.stderr, ("The user's access to the service has been "
+                               "disabled.")
+          break
+        if e.reason == "ServiceUnavailable":
+          print >>sys.stderr, "The service is not available; try again later."
+          break
+        raise
+      self._GetAuthCookie(auth_token)
+      return
+
+  def _DevAppServerAuthenticate(self):
+    """Authenticates the user on the dev_appserver."""
+    credentials = self.auth_function()
+    self.extra_headers["Cookie"] = ('dev_appserver_login="%s:True"; Path=/;'  %
+                                    (credentials[0],))
+
+  def Send(self, request_path, payload="",
+           content_type="application/octet-stream",
+           timeout=None,
+           **kwargs):
+    """Sends an RPC and returns the response.
+
+    Args:
+      request_path: The path to send the request to, eg /api/appversion/create.
+      payload: The body of the request, or None to send an empty request.
+      content_type: The Content-Type header to use.
+      timeout: timeout in seconds; default None i.e. no timeout.
+        (Note: for large requests on OS X, the timeout doesn't work right.)
+      kwargs: Any keyword arguments are converted into query string parameters.
+
+    Returns:
+      The response body, as a string.
+    """
+    old_timeout = socket.getdefaulttimeout()
+    socket.setdefaulttimeout(timeout)
+    try:
+      tries = 0
+      while True:
+        tries += 1
+        args = dict(kwargs)
+        url = "http://%s%s?%s" % (self.host, request_path,
+                                  urllib.urlencode(args))
+        req = self._CreateRequest(url=url, data=payload)
+        req.add_header("Content-Type", content_type)
+        req.add_header("X-appcfg-api-version", "1")
+        try:
+          f = self.opener.open(req)
+          response = f.read()
+          f.close()
+          return response
+        except urllib2.HTTPError, e:
+          logging.debug("Got http error, this is try #%s" % tries)
+          if tries > self.auth_tries:
+            raise
+          elif e.code == 401:
+            self._Authenticate()
+          elif e.code >= 500 and e.code < 600:
+            continue
+          elif e.code == 302:
+            loc = e.info()["location"]
+            logging.debug("Got 302 redirect. Location: %s" % loc)
+            if loc.startswith("https://www.google.com/accounts/ServiceLogin"):
+              self._Authenticate()
+            elif re.match(r"https://www.google.com/a/[a-z0-9.-]+/ServiceLogin",
+                          loc):
+              self.account_type = "HOSTED"
+              self._Authenticate()
+            elif loc.startswith("http://%s/_ah/login" % (self.host,)):
+              self._DevAppServerAuthenticate()
+          else:
+            raise
+    finally:
+      socket.setdefaulttimeout(old_timeout)
+
+
+class HttpRpcServer(AbstractRpcServer):
+  """Provides a simplified RPC-style interface for HTTP requests."""
+
+  DEFAULT_COOKIE_FILE_PATH = "~/.appcfg_cookies"
+
+  def _Authenticate(self):
+    """Save the cookie jar after authentication."""
+    if cert_file_available and not uses_cert_verification:
+      logging.warn("ssl module not found. Without this the identity of the "
+                   "remote host cannot be verified, and connections are NOT "
+                   "secure. To fix this, please install the ssl module from "
+                   "http://pypi.python.org/pypi/ssl")
+    super(HttpRpcServer, self)._Authenticate()
+    if self.cookie_jar.filename is not None and self.save_cookies:
+      logging.info("Saving authentication cookies to %s" %
+                   self.cookie_jar.filename)
+      self.cookie_jar.save()
+
+  def _GetOpener(self):
+    """Returns an OpenerDirector that supports cookies and ignores redirects.
+
+    Returns:
+      A urllib2.OpenerDirector object.
+    """
+    opener = urllib2.OpenerDirector()
+    opener.add_handler(urllib2.ProxyHandler())
+    opener.add_handler(urllib2.UnknownHandler())
+    opener.add_handler(urllib2.HTTPHandler())
+    opener.add_handler(urllib2.HTTPDefaultErrorHandler())
+    opener.add_handler(https_handler())
+    opener.add_handler(urllib2.HTTPErrorProcessor())
+
+    if self.save_cookies:
+      self.cookie_jar.filename = os.path.expanduser(
+          HttpRpcServer.DEFAULT_COOKIE_FILE_PATH)
+
+      if os.path.exists(self.cookie_jar.filename):
+        try:
+          self.cookie_jar.load()
+          self.authenticated = True
+          logging.info("Loaded authentication cookies from %s" %
+                       self.cookie_jar.filename)
+        except (OSError, IOError, cookielib.LoadError), e:
+          logging.debug("Could not load authentication cookies; %s: %s",
+                        e.__class__.__name__, e)
+          self.cookie_jar.filename = None
+      else:
+        try:
+          fd = os.open(self.cookie_jar.filename, os.O_CREAT, 0600)
+          os.close(fd)
+        except (OSError, IOError), e:
+          logging.debug("Could not create authentication cookies file; %s: %s",
+                        e.__class__.__name__, e)
+          self.cookie_jar.filename = None
+
+    opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
+    return opener
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver.py	Tue Jan 20 13:19:45 2009 +0000
@@ -76,10 +76,11 @@
 from google.appengine.api import appinfo
 from google.appengine.api import datastore_admin
 from google.appengine.api import datastore_file_stub
+from google.appengine.api import mail_stub
 from google.appengine.api import urlfetch_stub
-from google.appengine.api import mail_stub
 from google.appengine.api import user_service_stub
 from google.appengine.api import yaml_errors
+from google.appengine.api.capabilities import capability_stub
 from google.appengine.api.memcache import memcache_stub
 
 from google.appengine.tools import dev_appserver_index
@@ -467,7 +468,8 @@
     outfile.write('</span>\n')
 
 
-_IGNORE_HEADERS = frozenset(['content-type', 'content-length'])
+_IGNORE_REQUEST_HEADERS = frozenset(['content-type', 'content-length',
+                                     'accept-encoding', 'transfer-encoding'])
 
 def SetupEnvironment(cgi_path,
                      relative_url,
@@ -504,7 +506,7 @@
     env['USER_IS_ADMIN'] = '1'
 
   for key in headers:
-    if key in _IGNORE_HEADERS:
+    if key in _IGNORE_REQUEST_HEADERS:
       continue
     adjusted_name = key.replace('-', '_').upper()
     env['HTTP_' + adjusted_name] = ', '.join(headers.getheaders(key))
@@ -2135,6 +2137,12 @@
     return 'File dispatcher'
 
 
+_IGNORE_RESPONSE_HEADERS = frozenset([
+    'content-encoding', 'accept-encoding', 'transfer-encoding',
+    'server', 'date',
+    ])
+
+
 def RewriteResponse(response_file):
   """Interprets server-side headers and adjusts the HTTP response accordingly.
 
@@ -2150,6 +2158,8 @@
   Args:
     response_file: File-like object containing the full HTTP response including
       the response code, all headers, and the request body.
+    gmtime: Function which returns current time in a format matching standard
+      time.gmtime().
 
   Returns:
     Tuple (status_code, status_message, header, body) where:
@@ -2162,6 +2172,10 @@
   """
   headers = mimetools.Message(response_file)
 
+  for h in _IGNORE_RESPONSE_HEADERS:
+    if h in headers:
+      del headers[h]
+
   response_status = '%d Good to go' % httplib.OK
 
   location_value = headers.getheader('location')
@@ -2185,7 +2199,7 @@
   else:
     body = response_file.read()
 
-  headers['content-length'] = str(len(body))
+  headers['Content-Length'] = str(len(body))
 
   header_list = []
   for header in headers.headers:
@@ -2354,6 +2368,10 @@
       """
       BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
 
+    def version_string(self):
+      """Returns server's version string used for Server HTTP header"""
+      return self.server_version
+
     def do_GET(self):
       """Handle GET requests."""
       self._HandleRequest()
@@ -2757,6 +2775,10 @@
     'memcache',
     memcache_stub.MemcacheServiceStub())
 
+  apiproxy_stub_map.apiproxy.RegisterStub(
+    'capability_service',
+    capability_stub.CapabilityServiceStub())
+
   try:
     from google.appengine.api.images import images_stub
     apiproxy_stub_map.apiproxy.RegisterStub(
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py	Tue Jan 20 13:19:45 2009 +0000
@@ -71,6 +71,7 @@
 
 from google.appengine.api import yaml_errors
 from google.appengine.tools import appcfg
+from google.appengine.tools import appengine_rpc
 from google.appengine.tools import dev_appserver
 
 
@@ -271,9 +272,11 @@
   Returns:
     A HttpRpcServer.
   """
-  server = appcfg.HttpRpcServer(
+  server = appengine_rpc.HttpRpcServer(
       option_dict[ARG_ADMIN_CONSOLE_SERVER],
       lambda: ('unused_email', 'unused_password'),
+      appcfg.GetUserAgent(),
+      appcfg.GetSourceName(),
       host_override=option_dict[ARG_ADMIN_CONSOLE_HOST])
   server.authenticated = True
   return server
--- a/thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py	Tue Jan 20 13:19:45 2009 +0000
@@ -142,6 +142,19 @@
   def ParseASCIIIgnoreUnknown(self, ascii_string):
     raise AbstractMethod
 
+  def Equals(self, other):
+    raise AbstractMethod
+
+  def __eq__(self, other):
+    if other.__class__ is self.__class__:
+      return self.Equals(other)
+    return NotImplemented
+
+  def __ne__(self, other):
+    if other.__class__ is self.__class__:
+      return not self.Equals(other)
+    return NotImplemented
+
 
   def Output(self, e):
     dbg = []
@@ -279,29 +292,52 @@
     return
 
   def putVarInt32(self, v):
-    if v >= (1L << 31) or v < -(1L << 31):
+
+    buf_append = self.buf.append
+    if v & 127 == v:
+      buf_append(v)
+      return
+    if v >= 0x80000000 or v < -0x80000000:
       raise ProtocolBufferEncodeError, "int32 too big"
-    self.putVarInt64(v)
+    if v < 0:
+      v += 0x10000000000000000
+    while True:
+      bits = v & 127
+      v >>= 7
+      if v:
+        bits |= 128
+      buf_append(bits)
+      if not v:
+        break
     return
 
   def putVarInt64(self, v):
-    if v >= (1L << 63) or v < -(1L << 63):
+    buf_append = self.buf.append
+    if v >= 0x8000000000000000 or v < -0x8000000000000000:
       raise ProtocolBufferEncodeError, "int64 too big"
     if v < 0:
-      v += (1L << 64)
-    self.putVarUint64(v)
+      v += 0x10000000000000000
+    while True:
+      bits = v & 127
+      v >>= 7
+      if v:
+        bits |= 128
+      buf_append(bits)
+      if not v:
+        break
     return
 
   def putVarUint64(self, v):
-    if v < 0 or v >= (1L << 64):
+    buf_append = self.buf.append
+    if v < 0 or v >= 0x10000000000000000:
       raise ProtocolBufferEncodeError, "uint64 too big"
-    while 1:
+    while True:
       bits = v & 127
       v >>= 7
-      if (v != 0):
+      if v:
         bits |= 128
-      self.buf.append(bits)
-      if v == 0:
+      buf_append(bits)
+      if not v:
         break
     return
 
@@ -327,15 +363,11 @@
 
   def putPrefixedString(self, v):
     self.putVarInt32(len(v))
-    a = array.array('B')
-    a.fromstring(v)
-    self.buf.extend(a)
+    self.buf.fromstring(v)
     return
 
   def putRawString(self, v):
-    a = array.array('B')
-    a.fromstring(v)
-    self.buf.extend(a)
+    self.buf.fromstring(v)
 
 
 class Decoder:
@@ -421,10 +453,28 @@
             | (e << 16) | (d << 8) | c)
 
   def getVarInt32(self):
-    v = self.getVarInt64()
-    if v >= (1L << 31) or v < -(1L << 31):
+    b = self.get8()
+    if not (b & 128):
+      return b
+
+    result = long(0)
+    shift = 0
+
+    while 1:
+      result |= (long(b & 127) << shift)
+      shift += 7
+      if not (b & 128):
+        if result >= 0x10000000000000000L:
+          raise ProtocolBufferDecodeError, "corrupted"
+        break
+      if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
+      b = self.get8()
+
+    if result >= 0x8000000000000000L:
+      result -= 0x10000000000000000L
+    if result >= 0x80000000L or result < -0x80000000L:
       raise ProtocolBufferDecodeError, "corrupted"
-    return v
+    return result
 
   def getVarInt64(self):
     result = self.getVarUint64()
@@ -440,7 +490,7 @@
       b = self.get8()
       result |= (long(b & 127) << shift)
       shift += 7
-      if (b & 128) == 0:
+      if not (b & 128):
         if result >= (1L << 64): raise ProtocolBufferDecodeError, "corrupted"
         return result
     return result
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/net/proto/RawMessage.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+This is the Python counterpart to the RawMessage class defined in rawmessage.h.
+
+To use this, put the following line in your .proto file:
+
+python from google.net.proto.RawMessage import RawMessage
+
+"""
+
+__pychecker__ = 'no-callinit no-argsused'
+
+from google.net.proto import ProtocolBuffer
+
+class RawMessage(ProtocolBuffer.ProtocolMessage):
+  """
+  This is a special subclass of ProtocolMessage that doesn't interpret its data
+  in any way. Instead, it just stores it in a string.
+
+  See rawmessage.h for more details.
+  """
+
+  def __init__(self, initial=None):
+    self.__contents = ''
+    if initial is not None:
+      self.MergeFromString(initial)
+
+  def contents(self):
+    return self.__contents
+
+  def set_contents(self, contents):
+    self.__contents = contents
+
+  def Clear(self):
+    self.__contents = ''
+
+  def IsInitialized(self, debug_strs=None):
+    return 1
+
+  def __str__(self, prefix="", printElemNumber=0):
+    return prefix + self.DebugFormatString(self.__contents)
+
+  def OutputUnchecked(self, e):
+    e.putRawString(self.__contents)
+
+  def TryMerge(self, d):
+    self.__contents = d.getRawString()
+
+  def MergeFrom(self, pb):
+    assert pb is not self
+    if pb.__class__ != self.__class__:
+      return 0
+    self.__contents = pb.__contents
+    return 1
+
+  def Equals(self, pb):
+    return self.__contents == pb.__contents
+
+  def __eq__(self, other):
+    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
+
+  def __ne__(self, other):
+    return not (self == other)
+
+  def ByteSize(self):
+    return len(self.__contents)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/AUTHORS	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,2 @@
+Benjamin Niemann <pink at odahoda dot de>: Main developer of Python target.
+Clinton Roy <clinton.roy at gmail dot com>: AST templates and runtime.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/LICENSE	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,26 @@
+[The "BSD licence"]
+Copyright (c) 2003-2006 Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+    derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/MANIFEST.in	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,2 @@
+include LICENSE AUTHORS ez_setup.py
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/OWNERS	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,7 @@
+# Primary owners
+arb
+bslatkin
+guido
+
+# Backup
+kgibbs
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/README	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,90 @@
+1) ABOUT
+========
+
+This is the Python package 'antlr3', which is required to use parsers created
+by the ANTLR3 tool. See <http://www.antlr.org/> for more information about
+ANTLR3.
+
+
+2) STATUS
+=========
+
+The Python target for ANTLR3 is still in beta. Documentation is lacking, some
+bits of the code is not yet done, some functionality has not been tested yet.
+Also the API might change a bit - it currently mimics the Java implementation,
+but it may be made a bit more pythonic here and there.
+
+WARNING: Currently the runtime library for V3.1 is not compatible with
+recognizers generated by ANTLR V3.0.x. If you are an application developer,
+then the suggested way to solve this is to package the correct runtime with
+your application. Installing the runtime in the global site-packages directory
+may not be a good idea.
+It is still undetermined, if a future release of the V3.1 runtime will be
+compatible with V3.0.x recognizers or if future runtimes V3.2+ will be
+compatible with V3.1 recognizers.
+Sorry for the inconvenience.
+
+
+3) DOWNLOAD
+===========
+
+This runtime is part of the ANTLR distribution. The latest version can be found
+at <http://www.antlr.org/download.html>.
+
+If you are interested in the latest, most bleeding edge version, have a look at
+the perforce depot at <http://fisheye2.cenqua.com/browse/antlr>. There are
+tarballs ready to download, so you don't have to install the perforce client.
+
+
+4) INSTALLATION
+===============
+
+Just like any other Python package:
+$ python setup.py install
+
+See <http://docs.python.org/inst/> for more information.
+
+
+5) DOCUMENTATION
+================
+
+Documentation (as far as it exists) can be found in the wiki
+<http://www.antlr.org/wiki/display/ANTLR3/Antlr3PythonTarget>
+
+
+6) REPORTING BUGS
+=================
+
+Please send bug reports to the ANTLR mailing list 
+<http://www.antlr.org:8080/mailman/listinfo/antlr-interest> or
+<pink@odahoda.de>.
+
+Existing bugs may appear someday in the bugtracker:
+<http://www.antlr.org:8888/browse/ANTLR>
+
+
+7) HACKING
+==========
+
+Only the runtime package can be found here. There are also some StringTemplate
+files in 'src/org/antlr/codegen/templates/Python/' and some Java code in
+'src/org/antlr/codegen/PythonTarget.java' (of the main ANTLR3 source
+distribution).
+
+If there are no directories 'tests' and 'unittests' in 'runtime/Python', you
+should fetch the latest ANTLR3 version from the perforce depot. See section
+DOWNLOAD.
+You'll need java and ant in order to compile and use the tool.
+Be sure to properly setup your CLASSPATH.
+(FIXME: is there some generic information, how to build it yourself? I should
+point to it to avoid duplication.)
+
+You can then use the commands
+$ python setup.py unittest
+$ python setup.py functest
+to ensure that changes do not break existing behaviour.
+
+Please send patches to <pink@odahoda.de>. For larger code contributions you'll
+have to sign the "Developer's Certificate of Origin", which can be found on
+<http://www.antlr.org/license.html> or use the feedback form at
+<http://www.antlr.org/misc/feedback>.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/__init__.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,171 @@
+""" @package antlr3
+@brief ANTLR3 runtime package
+
+This module contains all support classes, which are needed to use recognizers
+generated by ANTLR3.
+
+@mainpage
+
+\note Please be warned that the line numbers in the API documentation do not
+match the real locations in the source code of the package. This is an
+unintended artifact of doxygen, which I could only convince to use the
+correct module names by concatenating all files from the package into a single
+module file...
+
+Here is a little overview over the most commonly used classes provided by
+this runtime:
+
+@section recognizers Recognizers
+
+These recognizers are baseclasses for the code which is generated by ANTLR3.
+
+- BaseRecognizer: Base class with common recognizer functionality.
+- Lexer: Base class for lexers.
+- Parser: Base class for parsers.
+- tree.TreeParser: Base class for %tree parser.
+
+@section streams Streams
+
+Each recognizer pulls its input from one of the stream classes below. Streams
+handle stuff like buffering, look-ahead and seeking.
+
+A character stream is usually the first element in the pipeline of a typical
+ANTLR3 application. It is used as the input for a Lexer.
+
+- ANTLRStringStream: Reads from a string objects. The input should be a unicode
+  object, or ANTLR3 will have trouble decoding non-ascii data.
+- ANTLRFileStream: Opens a file and read the contents, with optional character
+  decoding.
+- ANTLRInputStream: Reads the date from a file-like object, with optional
+  character decoding.
+
+A Parser needs a TokenStream as input (which in turn is usually fed by a
+Lexer):
+
+- CommonTokenStream: A basic and most commonly used TokenStream
+  implementation.
+- TokenRewriteStream: A modification of CommonTokenStream that allows the
+  stream to be altered (by the Parser). See the 'tweak' example for a usecase.
+
+And tree.TreeParser finally fetches its input from a tree.TreeNodeStream:
+
+- tree.CommonTreeNodeStream: A basic and most commonly used tree.TreeNodeStream
+  implementation.
+  
+
+@section tokenstrees Tokens and Trees
+
+A Lexer emits Token objects which are usually buffered by a TokenStream. A
+Parser can build a Tree, if the output=AST option has been set in the grammar.
+
+The runtime provides these Token implementations:
+
+- CommonToken: A basic and most commonly used Token implementation.
+- ClassicToken: A Token object as used in ANTLR 2.x, used to %tree
+  construction.
+
+Tree objects are wrapper for Token objects.
+
+- tree.CommonTree: A basic and most commonly used Tree implementation.
+
+A tree.TreeAdaptor is used by the parser to create tree.Tree objects for the
+input Token objects.
+
+- tree.CommonTreeAdaptor: A basic and most commonly used tree.TreeAdaptor
+implementation.
+
+
+@section Exceptions
+
+RecognitionException are generated, when a recognizer encounters incorrect
+or unexpected input.
+
+- RecognitionException
+  - MismatchedRangeException
+  - MismatchedSetException
+    - MismatchedNotSetException
+    .
+  - MismatchedTokenException
+  - MismatchedTreeNodeException
+  - NoViableAltException
+  - EarlyExitException
+  - FailedPredicateException
+  .
+.
+
+A tree.RewriteCardinalityException is raised, when the parsers hits a
+cardinality mismatch during AST construction. Although this is basically a
+bug in your grammar, it can only be detected at runtime.
+
+- tree.RewriteCardinalityException
+  - tree.RewriteEarlyExitException
+  - tree.RewriteEmptyStreamException
+  .
+.
+
+"""
+
+# tree.RewriteRuleElementStream
+# tree.RewriteRuleSubtreeStream
+# tree.RewriteRuleTokenStream
+# CharStream
+# DFA
+# TokenSource
+
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+__version__ = '3.1'
+
+def version_str_to_tuple(version_str):
+    import re
+    import sys
+
+    if version_str == 'HEAD':
+        return (sys.maxint, sys.maxint, sys.maxint, sys.maxint)
+
+    m = re.match(r'(\d+)\.(\d+)(\.(\d+))?(b(\d+))?', version_str)
+    if m is None:
+        raise ValueError("Bad version string %r" % version_str)
+
+    major = int(m.group(1))
+    minor = int(m.group(2))
+    patch = int(m.group(4) or 0)
+    beta = int(m.group(6) or sys.maxint)
+
+    return (major, minor, patch, beta)
+
+
+runtime_version_str = __version__
+runtime_version = version_str_to_tuple(runtime_version_str)
+
+
+from constants import *
+from dfa import *
+from exceptions import *
+from recognizers import *
+from streams import *
+from tokens import *
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/compat.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,48 @@
+"""Compatibility stuff"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+try:
+    set = set
+    frozenset = frozenset
+except NameError:
+    from sets import Set as set, ImmutableSet as frozenset
+
+
+try:
+    reversed = reversed
+except NameError:
+    def reversed(l):
+        l = l[:]
+        l.reverse()
+        return l
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/constants.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,57 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+EOF = -1
+
+## All tokens go to the parser (unless skip() is called in that rule)
+# on a particular "channel".  The parser tunes to a particular channel
+# so that whitespace etc... can go to the parser on a "hidden" channel.
+DEFAULT_CHANNEL = 0
+
+## Anything on different channel than DEFAULT_CHANNEL is not parsed
+# by parser.
+HIDDEN_CHANNEL = 99
+
+# Predefined token types
+EOR_TOKEN_TYPE = 1
+
+##
+# imaginary tree navigation type; traverse "get child" link
+DOWN = 2
+##
+#imaginary tree navigation type; finish with a child list
+UP = 3
+
+MIN_TOKEN_TYPE = UP+1
+	
+INVALID_TOKEN_TYPE = 0
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/dfa.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,213 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licensc]
+
+from antlr3.constants import EOF
+from antlr3.exceptions import NoViableAltException, BacktrackingFailed
+
+
+class DFA(object):
+    """@brief A DFA implemented as a set of transition tables.
+
+    Any state that has a semantic predicate edge is special; those states
+    are generated with if-then-else structures in a specialStateTransition()
+    which is generated by cyclicDFA template.
+    
+    """
+    
+    def __init__(
+        self,
+        recognizer, decisionNumber,
+        eot, eof, min, max, accept, special, transition
+        ):
+        ## Which recognizer encloses this DFA?  Needed to check backtracking
+        self.recognizer = recognizer
+
+        self.decisionNumber = decisionNumber
+        self.eot = eot
+        self.eof = eof
+        self.min = min
+        self.max = max
+        self.accept = accept
+        self.special = special
+        self.transition = transition
+
+
+    def predict(self, input):
+        """
+        From the input stream, predict what alternative will succeed
+	using this DFA (representing the covering regular approximation
+	to the underlying CFL).  Return an alternative number 1..n.  Throw
+	 an exception upon error.
+	"""
+        mark = input.mark()
+        s = 0 # we always start at s0
+        try:
+            for _ in xrange(50000):
+                #print "***Current state = %d" % s
+                
+                specialState = self.special[s]
+                if specialState >= 0:
+                    #print "is special"
+                    s = self.specialStateTransition(specialState, input)
+                    if s == -1:
+                        self.noViableAlt(s, input)
+                        return 0
+                    input.consume()
+                    continue
+
+                if self.accept[s] >= 1:
+                    #print "accept state for alt %d" % self.accept[s]
+                    return self.accept[s]
+
+                # look for a normal char transition
+                c = input.LA(1)
+
+                #print "LA = %d (%r)" % (c, unichr(c) if c >= 0 else 'EOF')
+                #print "range = %d..%d" % (self.min[s], self.max[s])
+
+                if c >= self.min[s] and c <= self.max[s]:
+                    # move to next state
+                    snext = self.transition[s][c-self.min[s]]
+                    #print "in range, next state = %d" % snext
+                    
+                    if snext < 0:
+                        #print "not a normal transition"
+                        # was in range but not a normal transition
+                        # must check EOT, which is like the else clause.
+                        # eot[s]>=0 indicates that an EOT edge goes to another
+                        # state.
+                        if self.eot[s] >= 0: # EOT Transition to accept state?
+                            #print "EOT trans to accept state %d" % self.eot[s]
+                            
+                            s = self.eot[s]
+                            input.consume()
+                            # TODO: I had this as return accept[eot[s]]
+                            # which assumed here that the EOT edge always
+                            # went to an accept...faster to do this, but
+                            # what about predicated edges coming from EOT
+                            # target?
+                            continue
+
+                        #print "no viable alt"
+                        self.noViableAlt(s, input)
+                        return 0
+
+                    s = snext
+                    input.consume()
+                    continue
+
+                if self.eot[s] >= 0:
+                    #print "EOT to %d" % self.eot[s]
+                    
+                    s = self.eot[s]
+                    input.consume()
+                    continue
+
+                # EOF Transition to accept state?
+                if c == EOF and self.eof[s] >= 0:
+                    #print "EOF Transition to accept state %d" \
+                    #  % self.accept[self.eof[s]]
+                    return self.accept[self.eof[s]]
+
+                # not in range and not EOF/EOT, must be invalid symbol
+                self.noViableAlt(s, input)
+                return 0
+
+            else:
+                raise RuntimeError("DFA bang!")
+            
+        finally:
+            input.rewind(mark)
+
+
+    def noViableAlt(self, s, input):
+        if self.recognizer._state.backtracking > 0:
+            raise BacktrackingFailed
+
+        nvae = NoViableAltException(
+            self.getDescription(),
+            self.decisionNumber,
+            s,
+            input
+            )
+
+        self.error(nvae)
+        raise nvae
+
+
+    def error(self, nvae):
+        """A hook for debugging interface"""
+        pass
+
+
+    def specialStateTransition(self, s, input):
+        return -1
+
+
+    def getDescription(self):
+        return "n/a"
+
+
+##     def specialTransition(self, state, symbol):
+##         return 0
+
+
+    def unpack(cls, string):
+        """@brief Unpack the runlength encoded table data.
+
+        Terence implemented packed table initializers, because Java has a
+        size restriction on .class files and the lookup tables can grow
+        pretty large. The generated JavaLexer.java of the Java.g example
+        would be about 15MB with uncompressed array initializers.
+
+        Python does not have any size restrictions, but the compilation of
+        such large source files seems to be pretty memory hungry. The memory
+        consumption of the python process grew to >1.5GB when importing a
+        15MB lexer, eating all my swap space and I was to impacient to see,
+        if it could finish at all. With packed initializers that are unpacked
+        at import time of the lexer module, everything works like a charm.
+        
+        """
+        
+        ret = []
+        for i in range(len(string) / 2):
+            (n, v) = ord(string[i*2]), ord(string[i*2+1])
+
+            # Is there a bitwise operation to do this?
+            if v == 0xFFFF:
+                v = -1
+
+            ret += [v] * n
+
+        return ret
+    
+    unpack = classmethod(unpack)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/dottreegen.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,210 @@
+""" @package antlr3.dottreegenerator
+@brief ANTLR3 runtime package, tree module
+
+This module contains all support classes for AST construction and tree parsers.
+
+"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+# lot's of docstrings are missing, don't complain for now...
+# pylint: disable-msg=C0111
+
+from antlr3.tree import CommonTreeAdaptor
+import stringtemplate3
+
+class DOTTreeGenerator(object):
+    """
+    A utility class to generate DOT diagrams (graphviz) from
+    arbitrary trees.  You can pass in your own templates and
+    can pass in any kind of tree or use Tree interface method.
+    """
+
+    _treeST = stringtemplate3.StringTemplate(
+        template=(
+        "digraph {\n" +
+        "  ordering=out;\n" +
+        "  ranksep=.4;\n" +
+        "  node [shape=plaintext, fixedsize=true, fontsize=11, fontname=\"Courier\",\n" +
+        "        width=.25, height=.25];\n" +
+        "  edge [arrowsize=.5]\n" +
+        "  $nodes$\n" +
+        "  $edges$\n" +
+        "}\n")
+        )
+
+    _nodeST = stringtemplate3.StringTemplate(
+        template="$name$ [label=\"$text$\"];\n"
+        )
+
+    _edgeST = stringtemplate3.StringTemplate(
+        template="$parent$ -> $child$ // \"$parentText$\" -> \"$childText$\"\n"
+        )
+
+    def __init__(self):
+        ## Track node to number mapping so we can get proper node name back
+        self.nodeToNumberMap = {}
+
+        ## Track node number so we can get unique node names
+        self.nodeNumber = 0
+
+
+    def toDOT(self, tree, adaptor=None, treeST=_treeST, edgeST=_edgeST):
+        if adaptor is None:
+            adaptor = CommonTreeAdaptor()
+
+        treeST = treeST.getInstanceOf()
+
+        self.nodeNumber = 0
+        self.toDOTDefineNodes(tree, adaptor, treeST)
+
+        self.nodeNumber = 0
+        self.toDOTDefineEdges(tree, adaptor, treeST, edgeST)
+        return treeST
+
+
+    def toDOTDefineNodes(self, tree, adaptor, treeST, knownNodes=None):
+        if knownNodes is None:
+            knownNodes = set()
+
+        if tree is None:
+            return
+
+        n = adaptor.getChildCount(tree)
+        if n == 0:
+            # must have already dumped as child from previous
+            # invocation; do nothing
+            return
+
+        # define parent node
+        number = self.getNodeNumber(tree)
+        if number not in knownNodes:
+            parentNodeST = self.getNodeST(adaptor, tree)
+            treeST.setAttribute("nodes", parentNodeST)
+            knownNodes.add(number)
+
+        # for each child, do a "<unique-name> [label=text]" node def
+        for i in range(n):
+            child = adaptor.getChild(tree, i)
+            
+            number = self.getNodeNumber(child)
+            if number not in knownNodes:
+                nodeST = self.getNodeST(adaptor, child)
+                treeST.setAttribute("nodes", nodeST)
+                knownNodes.add(number)
+
+            self.toDOTDefineNodes(child, adaptor, treeST, knownNodes)
+
+
+    def toDOTDefineEdges(self, tree, adaptor, treeST, edgeST):
+        if tree is None:
+            return
+
+        n = adaptor.getChildCount(tree)
+        if n == 0:
+            # must have already dumped as child from previous
+            # invocation; do nothing
+            return
+
+        parentName = "n%d" % self.getNodeNumber(tree)
+
+        # for each child, do a parent -> child edge using unique node names
+        parentText = adaptor.getText(tree)
+        for i in range(n):
+            child = adaptor.getChild(tree, i)
+            childText = adaptor.getText(child)
+            childName = "n%d" % self.getNodeNumber(child)
+            edgeST = edgeST.getInstanceOf()
+            edgeST.setAttribute("parent", parentName)
+            edgeST.setAttribute("child", childName)
+            edgeST.setAttribute("parentText", parentText)
+            edgeST.setAttribute("childText", childText)
+            treeST.setAttribute("edges", edgeST)
+            self.toDOTDefineEdges(child, adaptor, treeST, edgeST)
+
+
+    def getNodeST(self, adaptor, t):
+        text = adaptor.getText(t)
+        nodeST = self._nodeST.getInstanceOf()
+        uniqueName = "n%d" % self.getNodeNumber(t)
+        nodeST.setAttribute("name", uniqueName)
+        if text is not None:
+            text = text.replace('"', r'\\"')
+        nodeST.setAttribute("text", text)
+        return nodeST
+
+
+    def getNodeNumber(self, t):
+        try:
+            return self.nodeToNumberMap[t]
+        except KeyError:
+            self.nodeToNumberMap[t] = self.nodeNumber
+            self.nodeNumber += 1
+            return self.nodeNumber - 1
+
+
+def toDOT(tree, adaptor=None, treeST=DOTTreeGenerator._treeST, edgeST=DOTTreeGenerator._edgeST):
+    """
+    Generate DOT (graphviz) for a whole tree not just a node.
+    For example, 3+4*5 should generate:
+
+    digraph {
+        node [shape=plaintext, fixedsize=true, fontsize=11, fontname="Courier",
+            width=.4, height=.2];
+        edge [arrowsize=.7]
+        "+"->3
+        "+"->"*"
+        "*"->4
+        "*"->5
+    }
+
+    Return the ST not a string in case people want to alter.
+
+    Takes a Tree interface object.
+
+    Example of invokation:
+
+        import antlr3
+        import antlr3.extras
+
+        input = antlr3.ANTLRInputStream(sys.stdin)
+        lex = TLexer(input)
+        tokens = antlr3.CommonTokenStream(lex)
+        parser = TParser(tokens)
+        tree = parser.e().tree
+        print tree.toStringTree()
+        st = antlr3.extras.toDOT(t)
+        print st
+        
+    """
+
+    gen = DOTTreeGenerator()
+    return gen.toDOT(tree, adaptor, treeST, edgeST)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/exceptions.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,364 @@
+"""ANTLR3 exception hierarchy"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+from antlr3.constants import INVALID_TOKEN_TYPE
+
+
+class BacktrackingFailed(Exception):
+    """@brief Raised to signal failed backtrack attempt"""
+
+    pass
+
+
+class RecognitionException(Exception):
+    """@brief The root of the ANTLR exception hierarchy.
+
+    To avoid English-only error messages and to generally make things
+    as flexible as possible, these exceptions are not created with strings,
+    but rather the information necessary to generate an error.  Then
+    the various reporting methods in Parser and Lexer can be overridden
+    to generate a localized error message.  For example, MismatchedToken
+    exceptions are built with the expected token type.
+    So, don't expect getMessage() to return anything.
+
+    Note that as of Java 1.4, you can access the stack trace, which means
+    that you can compute the complete trace of rules from the start symbol.
+    This gives you considerable context information with which to generate
+    useful error messages.
+
+    ANTLR generates code that throws exceptions upon recognition error and
+    also generates code to catch these exceptions in each rule.  If you
+    want to quit upon first error, you can turn off the automatic error
+    handling mechanism using rulecatch action, but you still need to
+    override methods mismatch and recoverFromMismatchSet.
+    
+    In general, the recognition exceptions can track where in a grammar a
+    problem occurred and/or what was the expected input.  While the parser
+    knows its state (such as current input symbol and line info) that
+    state can change before the exception is reported so current token index
+    is computed and stored at exception time.  From this info, you can
+    perhaps print an entire line of input not just a single token, for example.
+    Better to just say the recognizer had a problem and then let the parser
+    figure out a fancy report.
+    
+    """
+
+    def __init__(self, input=None):
+        Exception.__init__(self)
+
+	# What input stream did the error occur in?
+        self.input = None
+
+        # What is index of token/char were we looking at when the error
+        # occurred?
+        self.index = None
+
+	# The current Token when an error occurred.  Since not all streams
+	# can retrieve the ith Token, we have to track the Token object.
+	# For parsers.  Even when it's a tree parser, token might be set.
+        self.token = None
+
+	# If this is a tree parser exception, node is set to the node with
+	# the problem.
+        self.node = None
+
+	# The current char when an error occurred. For lexers.
+        self.c = None
+
+	# Track the line at which the error occurred in case this is
+	# generated from a lexer.  We need to track this since the
+        # unexpected char doesn't carry the line info.
+        self.line = None
+
+        self.charPositionInLine = None
+
+        # If you are parsing a tree node stream, you will encounter som
+        # imaginary nodes w/o line/col info.  We now search backwards looking
+        # for most recent token with line/col info, but notify getErrorHeader()
+        # that info is approximate.
+        self.approximateLineInfo = False
+
+        
+        if input is not None:
+            self.input = input
+            self.index = input.index()
+
+            # late import to avoid cyclic dependencies
+            from antlr3.streams import TokenStream, CharStream
+            from antlr3.tree import TreeNodeStream
+
+            if isinstance(self.input, TokenStream):
+                self.token = self.input.LT(1)
+                self.line = self.token.line
+                self.charPositionInLine = self.token.charPositionInLine
+
+            if isinstance(self.input, TreeNodeStream):
+                self.extractInformationFromTreeNodeStream(self.input)
+
+            else:
+                if isinstance(self.input, CharStream):
+                    self.c = self.input.LT(1)
+                    self.line = self.input.line
+                    self.charPositionInLine = self.input.charPositionInLine
+
+                else:
+                    self.c = self.input.LA(1)
+
+    def extractInformationFromTreeNodeStream(self, nodes):
+        from antlr3.tree import Tree, CommonTree
+        from antlr3.tokens import CommonToken
+        
+        self.node = nodes.LT(1)
+        adaptor = nodes.adaptor
+        payload = adaptor.getToken(self.node)
+        if payload is not None:
+            self.token = payload
+            if payload.line <= 0:
+                # imaginary node; no line/pos info; scan backwards
+                i = -1
+                priorNode = nodes.LT(i)
+                while priorNode is not None:
+                    priorPayload = adaptor.getToken(priorNode)
+                    if priorPayload is not None and priorPayload.line > 0:
+                        # we found the most recent real line / pos info
+                        self.line = priorPayload.line
+                        self.charPositionInLine = priorPayload.charPositionInLine
+                        self.approximateLineInfo = True
+                        break
+                    
+                    i -= 1
+                    priorNode = nodes.LT(i)
+                    
+            else: # node created from real token
+                self.line = payload.line
+                self.charPositionInLine = payload.charPositionInLine
+                
+        elif isinstance(self.node, Tree):
+            self.line = self.node.line
+            self.charPositionInLine = self.node.charPositionInLine
+            if isinstance(self.node, CommonTree):
+                self.token = self.node.token
+
+        else:
+            type = adaptor.getType(self.node)
+            text = adaptor.getText(self.node)
+            self.token = CommonToken(type=type, text=text)
+
+     
+    def getUnexpectedType(self):
+        """Return the token type or char of the unexpected input element"""
+
+        from antlr3.streams import TokenStream
+        from antlr3.tree import TreeNodeStream
+
+        if isinstance(self.input, TokenStream):
+            return self.token.type
+
+        elif isinstance(self.input, TreeNodeStream):
+            adaptor = self.input.treeAdaptor
+            return adaptor.getType(self.node)
+
+        else:
+            return self.c
+
+    unexpectedType = property(getUnexpectedType)
+    
+
+class MismatchedTokenException(RecognitionException):
+    """@brief A mismatched char or Token or tree node."""
+    
+    def __init__(self, expecting, input):
+        RecognitionException.__init__(self, input)
+        self.expecting = expecting
+        
+
+    def __str__(self):
+        #return "MismatchedTokenException("+self.expecting+")"
+        return "MismatchedTokenException(%r!=%r)" % (
+            self.getUnexpectedType(), self.expecting
+            )
+    __repr__ = __str__
+
+
+class UnwantedTokenException(MismatchedTokenException):
+    """An extra token while parsing a TokenStream"""
+
+    def getUnexpectedToken(self):
+        return self.token
+
+
+    def __str__(self):
+        exp = ", expected %s" % self.expecting
+        if self.expecting == INVALID_TOKEN_TYPE:
+            exp = ""
+
+        if self.token is None:
+            return "UnwantedTokenException(found=%s%s)" % (None, exp)
+
+        return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp)
+    __repr__ = __str__
+
+
+class MissingTokenException(MismatchedTokenException):
+    """
+    We were expecting a token but it's not found.  The current token
+    is actually what we wanted next.
+    """
+
+    def __init__(self, expecting, input, inserted):
+        MismatchedTokenException.__init__(self, expecting, input)
+
+        self.inserted = inserted
+
+
+    def getMissingType(self):
+        return self.expecting
+
+
+    def __str__(self):
+        if self.inserted is not None and self.token is not None:
+            return "MissingTokenException(inserted %r at %r)" % (
+                self.inserted, self.token.text)
+
+        if self.token is not None:
+            return "MissingTokenException(at %r)" % self.token.text
+
+        return "MissingTokenException"
+    __repr__ = __str__
+
+
+class MismatchedRangeException(RecognitionException):
+    """@brief The next token does not match a range of expected types."""
+
+    def __init__(self, a, b, input):
+        RecognitionException.__init__(self, input)
+
+        self.a = a
+        self.b = b
+        
+
+    def __str__(self):
+        return "MismatchedRangeException(%r not in [%r..%r])" % (
+            self.getUnexpectedType(), self.a, self.b
+            )
+    __repr__ = __str__
+    
+
+class MismatchedSetException(RecognitionException):
+    """@brief The next token does not match a set of expected types."""
+
+    def __init__(self, expecting, input):
+        RecognitionException.__init__(self, input)
+
+        self.expecting = expecting
+        
+
+    def __str__(self):
+        return "MismatchedSetException(%r not in %r)" % (
+            self.getUnexpectedType(), self.expecting
+            )
+    __repr__ = __str__
+
+
+class MismatchedNotSetException(MismatchedSetException):
+    """@brief Used for remote debugger deserialization"""
+    
+    def __str__(self):
+        return "MismatchedNotSetException(%r!=%r)" % (
+            self.getUnexpectedType(), self.expecting
+            )
+    __repr__ = __str__
+
+
+class NoViableAltException(RecognitionException):
+    """@brief Unable to decide which alternative to choose."""
+
+    def __init__(
+        self, grammarDecisionDescription, decisionNumber, stateNumber, input
+        ):
+        RecognitionException.__init__(self, input)
+
+        self.grammarDecisionDescription = grammarDecisionDescription
+        self.decisionNumber = decisionNumber
+        self.stateNumber = stateNumber
+
+
+    def __str__(self):
+        return "NoViableAltException(%r!=[%r])" % (
+            self.unexpectedType, self.grammarDecisionDescription
+            )
+    __repr__ = __str__
+    
+
+class EarlyExitException(RecognitionException):
+    """@brief The recognizer did not match anything for a (..)+ loop."""
+
+    def __init__(self, decisionNumber, input):
+        RecognitionException.__init__(self, input)
+
+        self.decisionNumber = decisionNumber
+
+
+class FailedPredicateException(RecognitionException):
+    """@brief A semantic predicate failed during validation.
+
+    Validation of predicates
+    occurs when normally parsing the alternative just like matching a token.
+    Disambiguating predicate evaluation occurs when we hoist a predicate into
+    a prediction decision.
+    """
+
+    def __init__(self, input, ruleName, predicateText):
+        RecognitionException.__init__(self, input)
+        
+        self.ruleName = ruleName
+        self.predicateText = predicateText
+
+
+    def __str__(self):
+        return "FailedPredicateException("+self.ruleName+",{"+self.predicateText+"}?)"
+    __repr__ = __str__
+    
+
+class MismatchedTreeNodeException(RecognitionException):
+    """@brief The next tree mode does not match the expected type."""
+
+    def __init__(self, expecting, input):
+        RecognitionException.__init__(self, input)
+        
+        self.expecting = expecting
+
+    def __str__(self):
+        return "MismatchedTreeNodeException(%r!=%r)" % (
+            self.getUnexpectedType(), self.expecting
+            )
+    __repr__ = __str__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/extras.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,47 @@
+""" @package antlr3.dottreegenerator
+@brief ANTLR3 runtime package, tree module
+
+This module contains all support classes for AST construction and tree parsers.
+
+"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+# lot's of docstrings are missing, don't complain for now...
+# pylint: disable-msg=C0111
+
+from treewizard import TreeWizard
+
+try:
+    from antlr3.dottreegen import toDOT
+except ImportError, exc:
+    def toDOT(*args, **kwargs):
+        raise exc
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/main.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,289 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+
+import sys
+import optparse
+
+import antlr3
+
+
+class _Main(object):
+    def __init__(self):
+        self.stdin = sys.stdin
+        self.stdout = sys.stdout
+        self.stderr = sys.stderr
+
+        
+    def parseOptions(self, argv):
+        optParser = optparse.OptionParser()
+        optParser.add_option(
+            "--encoding",
+            action="store",
+            type="string",
+            dest="encoding"
+            )
+        optParser.add_option(
+            "--input",
+            action="store",
+            type="string",
+            dest="input"
+            )
+        optParser.add_option(
+            "--interactive", "-i",
+            action="store_true",
+            dest="interactive"
+            )
+        optParser.add_option(
+            "--no-output",
+            action="store_true",
+            dest="no_output"
+            )
+        optParser.add_option(
+            "--profile",
+            action="store_true",
+            dest="profile"
+            )
+        optParser.add_option(
+            "--hotshot",
+            action="store_true",
+            dest="hotshot"
+            )
+
+        self.setupOptions(optParser)
+        
+        return optParser.parse_args(argv[1:])
+
+
+    def setupOptions(self, optParser):
+        pass
+
+
+    def execute(self, argv):
+        options, args = self.parseOptions(argv)
+
+        self.setUp(options)
+        
+        if options.interactive:
+            while True:
+                try:
+                    input = raw_input(">>> ")
+                except (EOFError, KeyboardInterrupt):
+                    self.stdout.write("\nBye.\n")
+                    break
+            
+                inStream = antlr3.ANTLRStringStream(input)
+                self.parseStream(options, inStream)
+            
+        else:
+            if options.input is not None:
+                inStream = antlr3.ANTLRStringStream(options.input)
+
+            elif len(args) == 1 and args[0] != '-':
+                inStream = antlr3.ANTLRFileStream(
+                    args[0], encoding=options.encoding
+                    )
+
+            else:
+                inStream = antlr3.ANTLRInputStream(
+                    self.stdin, encoding=options.encoding
+                    )
+
+            if options.profile:
+                try:
+                    import cProfile as profile
+                except ImportError:
+                    import profile
+
+                profile.runctx(
+                    'self.parseStream(options, inStream)',
+                    globals(),
+                    locals(),
+                    'profile.dat'
+                    )
+
+                import pstats
+                stats = pstats.Stats('profile.dat')
+                stats.strip_dirs()
+                stats.sort_stats('time')
+                stats.print_stats(100)
+
+            elif options.hotshot:
+                import hotshot
+
+                profiler = hotshot.Profile('hotshot.dat')
+                profiler.runctx(
+                    'self.parseStream(options, inStream)',
+                    globals(),
+                    locals()
+                    )
+
+            else:
+                self.parseStream(options, inStream)
+
+
+    def setUp(self, options):
+        pass
+
+    
+    def parseStream(self, options, inStream):
+        raise NotImplementedError
+
+
+    def write(self, options, text):
+        if not options.no_output:
+            self.stdout.write(text)
+
+
+    def writeln(self, options, text):
+        self.write(options, text + '\n')
+
+
+class LexerMain(_Main):
+    def __init__(self, lexerClass):
+        _Main.__init__(self)
+
+        self.lexerClass = lexerClass
+        
+    
+    def parseStream(self, options, inStream):
+        lexer = self.lexerClass(inStream)
+        for token in lexer:
+            self.writeln(options, str(token))
+
+
+class ParserMain(_Main):
+    def __init__(self, lexerClassName, parserClass):
+        _Main.__init__(self)
+
+        self.lexerClassName = lexerClassName
+        self.lexerClass = None
+        self.parserClass = parserClass
+        
+    
+    def setupOptions(self, optParser):
+        optParser.add_option(
+            "--lexer",
+            action="store",
+            type="string",
+            dest="lexerClass",
+            default=self.lexerClassName
+            )
+        optParser.add_option(
+            "--rule",
+            action="store",
+            type="string",
+            dest="parserRule"
+            )
+
+
+    def setUp(self, options):
+        lexerMod = __import__(options.lexerClass)
+        self.lexerClass = getattr(lexerMod, options.lexerClass)
+
+        
+    def parseStream(self, options, inStream):
+        lexer = self.lexerClass(inStream)
+        tokenStream = antlr3.CommonTokenStream(lexer)
+        parser = self.parserClass(tokenStream)
+        result = getattr(parser, options.parserRule)()
+        if result is not None:
+            if hasattr(result, 'tree'):
+                if result.tree is not None:
+                    self.writeln(options, result.tree.toStringTree())
+            else:
+                self.writeln(options, repr(result))
+
+
+class WalkerMain(_Main):
+    def __init__(self, walkerClass):
+        _Main.__init__(self)
+
+        self.lexerClass = None
+        self.parserClass = None
+        self.walkerClass = walkerClass
+        
+    
+    def setupOptions(self, optParser):
+        optParser.add_option(
+            "--lexer",
+            action="store",
+            type="string",
+            dest="lexerClass",
+            default=None
+            )
+        optParser.add_option(
+            "--parser",
+            action="store",
+            type="string",
+            dest="parserClass",
+            default=None
+            )
+        optParser.add_option(
+            "--parser-rule",
+            action="store",
+            type="string",
+            dest="parserRule",
+            default=None
+            )
+        optParser.add_option(
+            "--rule",
+            action="store",
+            type="string",
+            dest="walkerRule"
+            )
+
+
+    def setUp(self, options):
+        lexerMod = __import__(options.lexerClass)
+        self.lexerClass = getattr(lexerMod, options.lexerClass)
+        parserMod = __import__(options.parserClass)
+        self.parserClass = getattr(parserMod, options.parserClass)
+
+        
+    def parseStream(self, options, inStream):
+        lexer = self.lexerClass(inStream)
+        tokenStream = antlr3.CommonTokenStream(lexer)
+        parser = self.parserClass(tokenStream)
+        result = getattr(parser, options.parserRule)()
+        if result is not None:
+            assert hasattr(result, 'tree'), "Parser did not return an AST"
+            nodeStream = antlr3.tree.CommonTreeNodeStream(result.tree)
+            nodeStream.setTokenStream(tokenStream)
+            walker = self.walkerClass(nodeStream)
+            result = getattr(walker, options.walkerRule)()
+            if result is not None:
+                if hasattr(result, 'tree'):
+                    self.writeln(options, result.tree.toStringTree())
+                else:
+                    self.writeln(options, repr(result))
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/recognizers.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,1511 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+import sys
+import inspect
+
+from antlr3 import runtime_version, runtime_version_str
+from antlr3.constants import DEFAULT_CHANNEL, HIDDEN_CHANNEL, EOF, \
+     EOR_TOKEN_TYPE, INVALID_TOKEN_TYPE
+from antlr3.exceptions import RecognitionException, MismatchedTokenException, \
+     MismatchedRangeException, MismatchedTreeNodeException, \
+     NoViableAltException, EarlyExitException, MismatchedSetException, \
+     MismatchedNotSetException, FailedPredicateException, \
+     BacktrackingFailed, UnwantedTokenException, MissingTokenException
+from antlr3.tokens import CommonToken, EOF_TOKEN, SKIP_TOKEN
+from antlr3.compat import set, frozenset, reversed
+
+
+class RecognizerSharedState(object):
+    """
+    The set of fields needed by an abstract recognizer to recognize input
+    and recover from errors etc...  As a separate state object, it can be
+    shared among multiple grammars; e.g., when one grammar imports another.
+
+    These fields are publically visible but the actual state pointer per
+    parser is protected.
+    """
+
+    def __init__(self):
+        # Track the set of token types that can follow any rule invocation.
+        # Stack grows upwards.
+        self.following = []
+
+        # This is true when we see an error and before having successfully
+        # matched a token.  Prevents generation of more than one error message
+        # per error.
+        self.errorRecovery = False
+
+        # The index into the input stream where the last error occurred.
+        # This is used to prevent infinite loops where an error is found
+        # but no token is consumed during recovery...another error is found,
+        # ad naseum.  This is a failsafe mechanism to guarantee that at least
+        # one token/tree node is consumed for two errors.
+        self.lastErrorIndex = -1
+
+        # If 0, no backtracking is going on.  Safe to exec actions etc...
+        # If >0 then it's the level of backtracking.
+        self.backtracking = 0
+
+        # An array[size num rules] of Map<Integer,Integer> that tracks
+        # the stop token index for each rule.  ruleMemo[ruleIndex] is
+        # the memoization table for ruleIndex.  For key ruleStartIndex, you
+        # get back the stop token for associated rule or MEMO_RULE_FAILED.
+        #
+        # This is only used if rule memoization is on (which it is by default).
+        self.ruleMemo = None
+
+        ## Did the recognizer encounter a syntax error?  Track how many.
+        self.syntaxErrors = 0
+
+
+        # LEXER FIELDS (must be in same state object to avoid casting
+        # constantly in generated code and Lexer object) :(
+
+
+	## The goal of all lexer rules/methods is to create a token object.
+        # This is an instance variable as multiple rules may collaborate to
+        # create a single token.  nextToken will return this object after
+        # matching lexer rule(s).  If you subclass to allow multiple token
+        # emissions, then set this to the last token to be matched or
+        # something nonnull so that the auto token emit mechanism will not
+        # emit another token.
+        self.token = None
+
+        ## What character index in the stream did the current token start at?
+        # Needed, for example, to get the text for current token.  Set at
+        # the start of nextToken.
+        self.tokenStartCharIndex = -1
+
+        ## The line on which the first character of the token resides
+        self.tokenStartLine = None
+
+        ## The character position of first character within the line
+        self.tokenStartCharPositionInLine = None
+
+        ## The channel number for the current token
+        self.channel = None
+
+        ## The token type for the current token
+        self.type = None
+
+        ## You can set the text for the current token to override what is in
+        # the input char buffer.  Use setText() or can set this instance var.
+        self.text = None
+        
+
+class BaseRecognizer(object):
+    """
+    @brief Common recognizer functionality.
+    
+    A generic recognizer that can handle recognizers generated from
+    lexer, parser, and tree grammars.  This is all the parsing
+    support code essentially; most of it is error recovery stuff and
+    backtracking.
+    """
+
+    MEMO_RULE_FAILED = -2
+    MEMO_RULE_UNKNOWN = -1
+
+    # copies from Token object for convenience in actions
+    DEFAULT_TOKEN_CHANNEL = DEFAULT_CHANNEL
+
+    # for convenience in actions
+    HIDDEN = HIDDEN_CHANNEL
+
+    # overridden by generated subclasses
+    tokenNames = None
+
+    # The antlr_version attribute has been introduced in 3.1. If it is not
+    # overwritten in the generated recognizer, we assume a default of 3.0.1.
+    antlr_version = (3, 0, 1, 0)
+    antlr_version_str = "3.0.1"
+
+    def __init__(self, state=None):
+        # Input stream of the recognizer. Must be initialized by a subclass.
+        self.input = None
+
+        ## State of a lexer, parser, or tree parser are collected into a state
+        # object so the state can be shared.  This sharing is needed to
+        # have one grammar import others and share same error variables
+        # and other state variables.  It's a kind of explicit multiple
+        # inheritance via delegation of methods and shared state.
+        if state is None:
+            state = RecognizerSharedState()
+        self._state = state
+
+        if self.antlr_version > runtime_version:
+            raise RuntimeError(
+                "ANTLR version mismatch: "
+                "The recognizer has been generated by V%s, but this runtime "
+                "is V%s. Please use the V%s runtime or higher."
+                % (self.antlr_version_str,
+                   runtime_version_str,
+                   self.antlr_version_str))
+        elif (self.antlr_version < (3, 1, 0, 0) and
+              self.antlr_version != runtime_version):
+            # FIXME: make the runtime compatible with 3.0.1 codegen
+            # and remove this block.
+            raise RuntimeError(
+                "ANTLR version mismatch: "
+                "The recognizer has been generated by V%s, but this runtime "
+                "is V%s. Please use the V%s runtime."
+                % (self.antlr_version_str,
+                   runtime_version_str,
+                   self.antlr_version_str))
+
+    # this one only exists to shut up pylint :(
+    def setInput(self, input):
+        self.input = input
+
+        
+    def reset(self):
+        """
+        reset the parser's state; subclasses must rewinds the input stream
+        """
+        
+        # wack everything related to error recovery
+        if self._state is None:
+            # no shared state work to do
+            return
+        
+        self._state.following = []
+        self._state.errorRecovery = False
+        self._state.lastErrorIndex = -1
+        self._state.syntaxErrors = 0
+        # wack everything related to backtracking and memoization
+        self._state.backtracking = 0
+        if self._state.ruleMemo is not None:
+            self._state.ruleMemo = {}
+
+
+    def match(self, input, ttype, follow):
+        """
+        Match current input symbol against ttype.  Attempt
+        single token insertion or deletion error recovery.  If
+        that fails, throw MismatchedTokenException.
+
+        To turn off single token insertion or deletion error
+        recovery, override mismatchRecover() and have it call
+        plain mismatch(), which does not recover.  Then any error
+        in a rule will cause an exception and immediate exit from
+        rule.  Rule would recover by resynchronizing to the set of
+        symbols that can follow rule ref.
+        """
+        
+        matchedSymbol = self.getCurrentInputSymbol(input)
+        if self.input.LA(1) == ttype:
+            self.input.consume()
+            self._state.errorRecovery = False
+            return matchedSymbol
+
+        if self._state.backtracking > 0:
+            # FIXME: need to return matchedSymbol here as well. damn!!
+            raise BacktrackingFailed
+
+        matchedSymbol = self.recoverFromMismatchedToken(input, ttype, follow)
+        return matchedSymbol
+
+
+    def matchAny(self, input):
+        """Match the wildcard: in a symbol"""
+
+        self._state.errorRecovery = False
+        self.input.consume()
+
+
+    def mismatchIsUnwantedToken(self, input, ttype):
+        return input.LA(2) == ttype
+
+
+    def mismatchIsMissingToken(self, input, follow):
+        if follow is None:
+            # we have no information about the follow; we can only consume
+            # a single token and hope for the best
+            return False
+        
+        # compute what can follow this grammar element reference
+        if EOR_TOKEN_TYPE in follow:
+            if len(self._state.following) > 0:
+                # remove EOR if we're not the start symbol
+                follow = follow - set([EOR_TOKEN_TYPE])
+
+            viableTokensFollowingThisRule = self.computeContextSensitiveRuleFOLLOW()
+            follow = follow | viableTokensFollowingThisRule
+
+        # if current token is consistent with what could come after set
+        # then we know we're missing a token; error recovery is free to
+        # "insert" the missing token
+        if input.LA(1) in follow or EOR_TOKEN_TYPE in follow:
+            return True
+
+        return False
+
+
+    def mismatch(self, input, ttype, follow):
+        """
+        Factor out what to do upon token mismatch so tree parsers can behave
+        differently.  Override and call mismatchRecover(input, ttype, follow)
+        to get single token insertion and deletion. Use this to turn of
+        single token insertion and deletion. Override mismatchRecover
+        to call this instead.
+        """
+
+        if self.mismatchIsUnwantedToken(input, ttype):
+            raise UnwantedTokenException(ttype, input)
+
+        elif self.mismatchIsMissingToken(input, follow):
+            raise MissingTokenException(ttype, input, None)
+
+        raise MismatchedTokenException(ttype, input)
+
+
+##     def mismatchRecover(self, input, ttype, follow):
+##         if self.mismatchIsUnwantedToken(input, ttype):
+##             mte = UnwantedTokenException(ttype, input)
+
+##         elif self.mismatchIsMissingToken(input, follow):
+##             mte = MissingTokenException(ttype, input)
+
+##         else:
+##             mte = MismatchedTokenException(ttype, input)
+
+##         self.recoverFromMismatchedToken(input, mte, ttype, follow)
+
+
+    def reportError(self, e):
+        """Report a recognition problem.
+            
+        This method sets errorRecovery to indicate the parser is recovering
+        not parsing.  Once in recovery mode, no errors are generated.
+        To get out of recovery mode, the parser must successfully match
+        a token (after a resync).  So it will go:
+
+        1. error occurs
+        2. enter recovery mode, report error
+        3. consume until token found in resynch set
+        4. try to resume parsing
+        5. next match() will reset errorRecovery mode
+
+        If you override, make sure to update syntaxErrors if you care about
+        that.
+        
+        """
+        
+        # if we've already reported an error and have not matched a token
+        # yet successfully, don't report any errors.
+        if self._state.errorRecovery:
+            return
+
+        self._state.syntaxErrors += 1 # don't count spurious
+        self._state.errorRecovery = True
+
+        self.displayRecognitionError(self.tokenNames, e)
+
+
+    def displayRecognitionError(self, tokenNames, e):
+        hdr = self.getErrorHeader(e)
+        msg = self.getErrorMessage(e, tokenNames)
+        self.emitErrorMessage(hdr+" "+msg)
+
+
+    def getErrorMessage(self, e, tokenNames):
+        """
+        What error message should be generated for the various
+        exception types?
+        
+        Not very object-oriented code, but I like having all error message
+        generation within one method rather than spread among all of the
+        exception classes. This also makes it much easier for the exception
+        handling because the exception classes do not have to have pointers back
+        to this object to access utility routines and so on. Also, changing
+        the message for an exception type would be difficult because you
+        would have to subclassing exception, but then somehow get ANTLR
+        to make those kinds of exception objects instead of the default.
+        This looks weird, but trust me--it makes the most sense in terms
+        of flexibility.
+
+        For grammar debugging, you will want to override this to add
+        more information such as the stack frame with
+        getRuleInvocationStack(e, this.getClass().getName()) and,
+        for no viable alts, the decision description and state etc...
+
+        Override this to change the message generated for one or more
+        exception types.
+        """
+
+        if isinstance(e, UnwantedTokenException):
+            tokenName = "<unknown>"
+            if e.expecting == EOF:
+                tokenName = "EOF"
+
+            else:
+                tokenName = self.tokenNames[e.expecting]
+
+            msg = "extraneous input %s expecting %s" % (
+                self.getTokenErrorDisplay(e.getUnexpectedToken()),
+                tokenName
+                )
+
+        elif isinstance(e, MissingTokenException):
+            tokenName = "<unknown>"
+            if e.expecting == EOF:
+                tokenName = "EOF"
+
+            else:
+                tokenName = self.tokenNames[e.expecting]
+
+            msg = "missing %s at %s" % (
+                tokenName, self.getTokenErrorDisplay(e.token)
+                )
+
+        elif isinstance(e, MismatchedTokenException):
+            tokenName = "<unknown>"
+            if e.expecting == EOF:
+                tokenName = "EOF"
+            else:
+                tokenName = self.tokenNames[e.expecting]
+
+            msg = "mismatched input " \
+                  + self.getTokenErrorDisplay(e.token) \
+                  + " expecting " \
+                  + tokenName
+
+        elif isinstance(e, MismatchedTreeNodeException):
+            tokenName = "<unknown>"
+            if e.expecting == EOF:
+                tokenName = "EOF"
+            else:
+                tokenName = self.tokenNames[e.expecting]
+
+            msg = "mismatched tree node: %s expecting %s" \
+                  % (e.node, tokenName)
+
+        elif isinstance(e, NoViableAltException):
+            msg = "no viable alternative at input " \
+                  + self.getTokenErrorDisplay(e.token)
+
+        elif isinstance(e, EarlyExitException):
+            msg = "required (...)+ loop did not match anything at input " \
+                  + self.getTokenErrorDisplay(e.token)
+
+        elif isinstance(e, MismatchedSetException):
+            msg = "mismatched input " \
+                  + self.getTokenErrorDisplay(e.token) \
+                  + " expecting set " \
+                  + repr(e.expecting)
+
+        elif isinstance(e, MismatchedNotSetException):
+            msg = "mismatched input " \
+                  + self.getTokenErrorDisplay(e.token) \
+                  + " expecting set " \
+                  + repr(e.expecting)
+
+        elif isinstance(e, FailedPredicateException):
+            msg = "rule " \
+                  + e.ruleName \
+                  + " failed predicate: {" \
+                  + e.predicateText \
+                  + "}?"
+
+        else:
+            msg = str(e)
+
+        return msg
+    
+
+    def getNumberOfSyntaxErrors(self):
+        """
+        Get number of recognition errors (lexer, parser, tree parser).  Each
+        recognizer tracks its own number.  So parser and lexer each have
+        separate count.  Does not count the spurious errors found between
+        an error and next valid token match
+
+        See also reportError()
+	"""
+        return self._state.syntaxErrors
+
+
+    def getErrorHeader(self, e):
+        """
+        What is the error header, normally line/character position information?
+        """
+        
+        return "line %d:%d" % (e.line, e.charPositionInLine)
+
+
+    def getTokenErrorDisplay(self, t):
+        """
+        How should a token be displayed in an error message? The default
+        is to display just the text, but during development you might
+        want to have a lot of information spit out.  Override in that case
+        to use t.toString() (which, for CommonToken, dumps everything about
+        the token). This is better than forcing you to override a method in
+        your token objects because you don't have to go modify your lexer
+        so that it creates a new Java type.
+        """
+        
+        s = t.text
+        if s is None:
+            if t.type == EOF:
+                s = "<EOF>"
+            else:
+                s = "<"+t.type+">"
+
+        return repr(s)
+    
+
+    def emitErrorMessage(self, msg):
+        """Override this method to change where error messages go"""
+        sys.stderr.write(msg + '\n')
+
+
+    def recover(self, input, re):
+        """
+        Recover from an error found on the input stream.  This is
+        for NoViableAlt and mismatched symbol exceptions.  If you enable
+        single token insertion and deletion, this will usually not
+        handle mismatched symbol exceptions but there could be a mismatched
+        token that the match() routine could not recover from.
+        """
+        
+        # PROBLEM? what if input stream is not the same as last time
+        # perhaps make lastErrorIndex a member of input
+        if self._state.lastErrorIndex == input.index():
+            # uh oh, another error at same token index; must be a case
+            # where LT(1) is in the recovery token set so nothing is
+            # consumed; consume a single token so at least to prevent
+            # an infinite loop; this is a failsafe.
+            input.consume()
+
+        self._state.lastErrorIndex = input.index()
+        followSet = self.computeErrorRecoverySet()
+        
+        self.beginResync()
+        self.consumeUntil(input, followSet)
+        self.endResync()
+
+
+    def beginResync(self):
+        """
+        A hook to listen in on the token consumption during error recovery.
+        The DebugParser subclasses this to fire events to the listenter.
+        """
+
+        pass
+
+
+    def endResync(self):
+        """
+        A hook to listen in on the token consumption during error recovery.
+        The DebugParser subclasses this to fire events to the listenter.
+        """
+
+        pass
+
+
+    def computeErrorRecoverySet(self):
+        """
+        Compute the error recovery set for the current rule.  During
+        rule invocation, the parser pushes the set of tokens that can
+        follow that rule reference on the stack; this amounts to
+        computing FIRST of what follows the rule reference in the
+        enclosing rule. This local follow set only includes tokens
+        from within the rule; i.e., the FIRST computation done by
+        ANTLR stops at the end of a rule.
+
+        EXAMPLE
+
+        When you find a "no viable alt exception", the input is not
+        consistent with any of the alternatives for rule r.  The best
+        thing to do is to consume tokens until you see something that
+        can legally follow a call to r *or* any rule that called r.
+        You don't want the exact set of viable next tokens because the
+        input might just be missing a token--you might consume the
+        rest of the input looking for one of the missing tokens.
+
+        Consider grammar:
+
+        a : '[' b ']'
+          | '(' b ')'
+          ;
+        b : c '^' INT ;
+        c : ID
+          | INT
+          ;
+
+        At each rule invocation, the set of tokens that could follow
+        that rule is pushed on a stack.  Here are the various "local"
+        follow sets:
+
+        FOLLOW(b1_in_a) = FIRST(']') = ']'
+        FOLLOW(b2_in_a) = FIRST(')') = ')'
+        FOLLOW(c_in_b) = FIRST('^') = '^'
+
+        Upon erroneous input "[]", the call chain is
+
+        a -> b -> c
+
+        and, hence, the follow context stack is:
+
+        depth  local follow set     after call to rule
+          0         \<EOF>                    a (from main())
+          1          ']'                     b
+          3          '^'                     c
+
+        Notice that ')' is not included, because b would have to have
+        been called from a different context in rule a for ')' to be
+        included.
+
+        For error recovery, we cannot consider FOLLOW(c)
+        (context-sensitive or otherwise).  We need the combined set of
+        all context-sensitive FOLLOW sets--the set of all tokens that
+        could follow any reference in the call chain.  We need to
+        resync to one of those tokens.  Note that FOLLOW(c)='^' and if
+        we resync'd to that token, we'd consume until EOF.  We need to
+        sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+        In this case, for input "[]", LA(1) is in this set so we would
+        not consume anything and after printing an error rule c would
+        return normally.  It would not find the required '^' though.
+        At this point, it gets a mismatched token error and throws an
+        exception (since LA(1) is not in the viable following token
+        set).  The rule exception handler tries to recover, but finds
+        the same recovery set and doesn't consume anything.  Rule b
+        exits normally returning to rule a.  Now it finds the ']' (and
+        with the successful match exits errorRecovery mode).
+
+        So, you cna see that the parser walks up call chain looking
+        for the token that was a member of the recovery set.
+
+        Errors are not generated in errorRecovery mode.
+
+        ANTLR's error recovery mechanism is based upon original ideas:
+
+        "Algorithms + Data Structures = Programs" by Niklaus Wirth
+
+        and
+
+        "A note on error recovery in recursive descent parsers":
+        http://portal.acm.org/citation.cfm?id=947902.947905
+
+        Later, Josef Grosch had some good ideas:
+
+        "Efficient and Comfortable Error Recovery in Recursive Descent
+        Parsers":
+        ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+
+        Like Grosch I implemented local FOLLOW sets that are combined
+        at run-time upon error to avoid overhead during parsing.
+        """
+        
+        return self.combineFollows(False)
+
+        
+    def computeContextSensitiveRuleFOLLOW(self):
+        """
+        Compute the context-sensitive FOLLOW set for current rule.
+        This is set of token types that can follow a specific rule
+        reference given a specific call chain.  You get the set of
+        viable tokens that can possibly come next (lookahead depth 1)
+        given the current call chain.  Contrast this with the
+        definition of plain FOLLOW for rule r:
+
+         FOLLOW(r)={x | S=>*alpha r beta in G and x in FIRST(beta)}
+
+        where x in T* and alpha, beta in V*; T is set of terminals and
+        V is the set of terminals and nonterminals.  In other words,
+        FOLLOW(r) is the set of all tokens that can possibly follow
+        references to r in *any* sentential form (context).  At
+        runtime, however, we know precisely which context applies as
+        we have the call chain.  We may compute the exact (rather
+        than covering superset) set of following tokens.
+
+        For example, consider grammar:
+
+        stat : ID '=' expr ';'      // FOLLOW(stat)=={EOF}
+             | "return" expr '.'
+             ;
+        expr : atom ('+' atom)* ;   // FOLLOW(expr)=={';','.',')'}
+        atom : INT                  // FOLLOW(atom)=={'+',')',';','.'}
+             | '(' expr ')'
+             ;
+
+        The FOLLOW sets are all inclusive whereas context-sensitive
+        FOLLOW sets are precisely what could follow a rule reference.
+        For input input "i=(3);", here is the derivation:
+
+        stat => ID '=' expr ';'
+             => ID '=' atom ('+' atom)* ';'
+             => ID '=' '(' expr ')' ('+' atom)* ';'
+             => ID '=' '(' atom ')' ('+' atom)* ';'
+             => ID '=' '(' INT ')' ('+' atom)* ';'
+             => ID '=' '(' INT ')' ';'
+
+        At the "3" token, you'd have a call chain of
+
+          stat -> expr -> atom -> expr -> atom
+
+        What can follow that specific nested ref to atom?  Exactly ')'
+        as you can see by looking at the derivation of this specific
+        input.  Contrast this with the FOLLOW(atom)={'+',')',';','.'}.
+
+        You want the exact viable token set when recovering from a
+        token mismatch.  Upon token mismatch, if LA(1) is member of
+        the viable next token set, then you know there is most likely
+        a missing token in the input stream.  "Insert" one by just not
+        throwing an exception.
+        """
+
+        return self.combineFollows(True)
+
+
+    def combineFollows(self, exact):
+        followSet = set()
+        for idx, localFollowSet in reversed(list(enumerate(self._state.following))):
+            followSet |= localFollowSet
+            if exact:
+                # can we see end of rule?
+                if EOR_TOKEN_TYPE in localFollowSet:
+                    # Only leave EOR in set if at top (start rule); this lets
+                    # us know if have to include follow(start rule); i.e., EOF
+                    if idx > 0:
+                        followSet.remove(EOR_TOKEN_TYPE)
+                        
+                else:
+                    # can't see end of rule, quit
+                    break
+
+        return followSet
+
+
+    def recoverFromMismatchedToken(self, input, ttype, follow):
+        """Attempt to recover from a single missing or extra token.
+
+        EXTRA TOKEN
+
+        LA(1) is not what we are looking for.  If LA(2) has the right token,
+        however, then assume LA(1) is some extra spurious token.  Delete it
+        and LA(2) as if we were doing a normal match(), which advances the
+        input.
+
+        MISSING TOKEN
+
+        If current token is consistent with what could come after
+        ttype then it is ok to 'insert' the missing token, else throw
+        exception For example, Input 'i=(3;' is clearly missing the
+        ')'.  When the parser returns from the nested call to expr, it
+        will have call chain:
+
+          stat -> expr -> atom
+
+        and it will be trying to match the ')' at this point in the
+        derivation:
+
+             => ID '=' '(' INT ')' ('+' atom)* ';'
+                                ^
+        match() will see that ';' doesn't match ')' and report a
+        mismatched token error.  To recover, it sees that LA(1)==';'
+        is in the set of tokens that can follow the ')' token
+        reference in rule atom.  It can assume that you forgot the ')'.
+        """
+
+        e = None
+
+        # if next token is what we are looking for then "delete" this token
+        if self. mismatchIsUnwantedToken(input, ttype):
+            e = UnwantedTokenException(ttype, input)
+
+            self.beginResync()
+            input.consume() # simply delete extra token
+            self.endResync()
+
+            # report after consuming so AW sees the token in the exception
+            self.reportError(e)
+
+            # we want to return the token we're actually matching
+            matchedSymbol = self.getCurrentInputSymbol(input)
+
+            # move past ttype token as if all were ok
+            input.consume()
+            return matchedSymbol
+
+        # can't recover with single token deletion, try insertion
+        if self.mismatchIsMissingToken(input, follow):
+            inserted = self.getMissingSymbol(input, e, ttype, follow)
+            e = MissingTokenException(ttype, input, inserted)
+
+            # report after inserting so AW sees the token in the exception
+            self.reportError(e)
+            return inserted
+
+        # even that didn't work; must throw the exception
+        e = MismatchedTokenException(ttype, input)
+        raise e
+
+
+    def recoverFromMismatchedSet(self, input, e, follow):
+        """Not currently used"""
+
+        if self.mismatchIsMissingToken(input, follow):
+            self.reportError(e)
+            # we don't know how to conjure up a token for sets yet
+            return self.getMissingSymbol(input, e, INVALID_TOKEN_TYPE, follow)
+
+        # TODO do single token deletion like above for Token mismatch
+        raise e
+
+
+    def getCurrentInputSymbol(self, input):
+        """
+        Match needs to return the current input symbol, which gets put
+        into the label for the associated token ref; e.g., x=ID.  Token
+        and tree parsers need to return different objects. Rather than test
+        for input stream type or change the IntStream interface, I use
+        a simple method to ask the recognizer to tell me what the current
+        input symbol is.
+
+        This is ignored for lexers.
+        """
+        
+        return None
+
+
+    def getMissingSymbol(self, input, e, expectedTokenType, follow):
+        """Conjure up a missing token during error recovery.
+
+        The recognizer attempts to recover from single missing
+        symbols. But, actions might refer to that missing symbol.
+        For example, x=ID {f($x);}. The action clearly assumes
+        that there has been an identifier matched previously and that
+        $x points at that token. If that token is missing, but
+        the next token in the stream is what we want we assume that
+        this token is missing and we keep going. Because we
+        have to return some token to replace the missing token,
+        we have to conjure one up. This method gives the user control
+        over the tokens returned for missing tokens. Mostly,
+        you will want to create something special for identifier
+        tokens. For literals such as '{' and ',', the default
+        action in the parser or tree parser works. It simply creates
+        a CommonToken of the appropriate type. The text will be the token.
+        If you change what tokens must be created by the lexer,
+        override this method to create the appropriate tokens.
+        """
+
+        return None
+
+
+##     def recoverFromMissingElement(self, input, e, follow):
+##         """
+##         This code is factored out from mismatched token and mismatched set
+##         recovery.  It handles "single token insertion" error recovery for
+##         both.  No tokens are consumed to recover from insertions.  Return
+##         true if recovery was possible else return false.
+##         """
+        
+##         if self.mismatchIsMissingToken(input, follow):
+##             self.reportError(e)
+##             return True
+
+##         # nothing to do; throw exception
+##         return False
+
+
+    def consumeUntil(self, input, tokenTypes):
+        """
+        Consume tokens until one matches the given token or token set
+
+        tokenTypes can be a single token type or a set of token types
+        
+        """
+        
+        if not isinstance(tokenTypes, (set, frozenset)):
+            tokenTypes = frozenset([tokenTypes])
+
+        ttype = input.LA(1)
+        while ttype != EOF and ttype not in tokenTypes:
+            input.consume()
+            ttype = input.LA(1)
+
+
+    def getRuleInvocationStack(self):
+        """
+        Return List<String> of the rules in your parser instance
+        leading up to a call to this method.  You could override if
+        you want more details such as the file/line info of where
+        in the parser java code a rule is invoked.
+
+        This is very useful for error messages and for context-sensitive
+        error recovery.
+
+        You must be careful, if you subclass a generated recognizers.
+        The default implementation will only search the module of self
+        for rules, but the subclass will not contain any rules.
+        You probably want to override this method to look like
+
+        def getRuleInvocationStack(self):
+            return self._getRuleInvocationStack(<class>.__module__)
+
+        where <class> is the class of the generated recognizer, e.g.
+        the superclass of self.
+        """
+
+        return self._getRuleInvocationStack(self.__module__)
+
+
+    def _getRuleInvocationStack(cls, module):
+        """
+        A more general version of getRuleInvocationStack where you can
+        pass in, for example, a RecognitionException to get it's rule
+        stack trace.  This routine is shared with all recognizers, hence,
+        static.
+
+        TODO: move to a utility class or something; weird having lexer call
+        this
+        """
+
+        # mmmhhh,... perhaps look at the first argument
+        # (f_locals[co_varnames[0]]?) and test if it's a (sub)class of
+        # requested recognizer...
+        
+        rules = []
+        for frame in reversed(inspect.stack()):
+            code = frame[0].f_code
+            codeMod = inspect.getmodule(code)
+            if codeMod is None:
+                continue
+
+            # skip frames not in requested module
+            if codeMod.__name__ != module:
+                continue
+
+            # skip some unwanted names
+            if code.co_name in ('nextToken', '<module>'):
+                continue
+
+            rules.append(code.co_name)
+
+        return rules
+        
+    _getRuleInvocationStack = classmethod(_getRuleInvocationStack)
+    
+
+    def getBacktrackingLevel(self):
+        return self._state.backtracking
+
+
+    def getGrammarFileName(self):
+        """For debugging and other purposes, might want the grammar name.
+        
+        Have ANTLR generate an implementation for this method.
+        """
+
+        return self.grammarFileName
+
+
+    def getSourceName(self):
+        raise NotImplementedError
+
+    
+    def toStrings(self, tokens):
+        """A convenience method for use most often with template rewrites.
+
+        Convert a List<Token> to List<String>
+        """
+
+        if tokens is None:
+            return None
+
+        return [token.text for token in tokens]
+
+
+    def getRuleMemoization(self, ruleIndex, ruleStartIndex):
+        """
+        Given a rule number and a start token index number, return
+        MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
+        start index.  If this rule has parsed input starting from the
+        start index before, then return where the rule stopped parsing.
+        It returns the index of the last token matched by the rule.
+        """
+        
+        if ruleIndex not in self._state.ruleMemo:
+            self._state.ruleMemo[ruleIndex] = {}
+
+        return self._state.ruleMemo[ruleIndex].get(
+            ruleStartIndex, self.MEMO_RULE_UNKNOWN
+            )
+
+
+    def alreadyParsedRule(self, input, ruleIndex):
+        """
+        Has this rule already parsed input at the current index in the
+        input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
+        If we attempted but failed to parse properly before, return
+        MEMO_RULE_FAILED.
+
+        This method has a side-effect: if we have seen this input for
+        this rule and successfully parsed before, then seek ahead to
+        1 past the stop token matched for this rule last time.
+        """
+
+        stopIndex = self.getRuleMemoization(ruleIndex, input.index())
+        if stopIndex == self.MEMO_RULE_UNKNOWN:
+            return False
+
+        if stopIndex == self.MEMO_RULE_FAILED:
+            raise BacktrackingFailed
+
+        else:
+            input.seek(stopIndex + 1)
+
+        return True
+
+
+    def memoize(self, input, ruleIndex, ruleStartIndex, success):
+        """
+        Record whether or not this rule parsed the input at this position
+        successfully.
+        """
+
+        if success:
+            stopTokenIndex = input.index() - 1
+        else:
+            stopTokenIndex = self.MEMO_RULE_FAILED
+        
+        if ruleIndex in self._state.ruleMemo:
+            self._state.ruleMemo[ruleIndex][ruleStartIndex] = stopTokenIndex
+
+
+    def traceIn(self, ruleName, ruleIndex, inputSymbol):
+        sys.stdout.write("enter %s %s" % (ruleName, inputSymbol))
+        
+##         if self._state.failed:
+##             sys.stdout.write(" failed=%s" % self._state.failed)
+
+        if self._state.backtracking > 0:
+            sys.stdout.write(" backtracking=%s" % self._state.backtracking)
+
+        sys.stdout.write('\n')
+
+
+    def traceOut(self, ruleName, ruleIndex, inputSymbol):
+        sys.stdout.write("exit %s %s" % (ruleName, inputSymbol))
+        
+##         if self._state.failed:
+##             sys.stdout.write(" failed=%s" % self._state.failed)
+
+        if self._state.backtracking > 0:
+            sys.stdout.write(" backtracking=%s" % self._state.backtracking)
+
+        sys.stdout.write('\n')
+
+
+
+class TokenSource(object):
+    """
+    @brief Abstract baseclass for token producers.
+    
+    A source of tokens must provide a sequence of tokens via nextToken()
+    and also must reveal it's source of characters; CommonToken's text is
+    computed from a CharStream; it only store indices into the char stream.
+
+    Errors from the lexer are never passed to the parser.  Either you want
+    to keep going or you do not upon token recognition error.  If you do not
+    want to continue lexing then you do not want to continue parsing.  Just
+    throw an exception not under RecognitionException and Java will naturally
+    toss you all the way out of the recognizers.  If you want to continue
+    lexing then you should not throw an exception to the parser--it has already
+    requested a token.  Keep lexing until you get a valid one.  Just report
+    errors and keep going, looking for a valid token.
+    """
+    
+    def nextToken(self):
+        """Return a Token object from your input stream (usually a CharStream).
+        
+        Do not fail/return upon lexing error; keep chewing on the characters
+        until you get a good one; errors are not passed through to the parser.
+        """
+
+        raise NotImplementedError
+    
+
+    def __iter__(self):
+        """The TokenSource is an interator.
+
+        The iteration will not include the final EOF token, see also the note
+        for the next() method.
+
+        """
+        
+        return self
+
+    
+    def next(self):
+        """Return next token or raise StopIteration.
+
+        Note that this will raise StopIteration when hitting the EOF token,
+        so EOF will not be part of the iteration.
+        
+        """
+
+        token = self.nextToken()
+        if token is None or token.type == EOF:
+            raise StopIteration
+        return token
+
+    
+class Lexer(BaseRecognizer, TokenSource):
+    """
+    @brief Baseclass for generated lexer classes.
+    
+    A lexer is recognizer that draws input symbols from a character stream.
+    lexer grammars result in a subclass of this object. A Lexer object
+    uses simplified match() and error recovery mechanisms in the interest
+    of speed.
+    """
+
+    def __init__(self, input, state=None):
+        BaseRecognizer.__init__(self, state)
+        TokenSource.__init__(self)
+        
+        # Where is the lexer drawing characters from?
+        self.input = input
+
+
+    def reset(self):
+        BaseRecognizer.reset(self) # reset all recognizer state variables
+
+        if self.input is not None:
+            # rewind the input
+            self.input.seek(0)
+
+        if self._state is None:
+            # no shared state work to do
+            return
+        
+        # wack Lexer state variables
+        self._state.token = None
+        self._state.type = INVALID_TOKEN_TYPE
+        self._state.channel = DEFAULT_CHANNEL
+        self._state.tokenStartCharIndex = -1
+        self._state.tokenStartLine = -1
+        self._state.tokenStartCharPositionInLine = -1
+        self._state.text = None
+
+
+    def nextToken(self):
+        """
+        Return a token from this source; i.e., match a token on the char
+        stream.
+        """
+        
+        while 1:
+            self._state.token = None
+            self._state.channel = DEFAULT_CHANNEL
+            self._state.tokenStartCharIndex = self.input.index()
+            self._state.tokenStartCharPositionInLine = self.input.charPositionInLine
+            self._state.tokenStartLine = self.input.line
+            self._state.text = None
+            if self.input.LA(1) == EOF:
+                return EOF_TOKEN
+
+            try:
+                self.mTokens()
+                
+                if self._state.token is None:
+                    self.emit()
+                    
+                elif self._state.token == SKIP_TOKEN:
+                    continue
+
+                return self._state.token
+
+            except NoViableAltException, re:
+                self.reportError(re)
+                self.recover(re) # throw out current char and try again
+
+            except RecognitionException, re:
+                self.reportError(re)
+                # match() routine has already called recover()
+
+
+    def skip(self):
+        """
+        Instruct the lexer to skip creating a token for current lexer rule
+        and look for another token.  nextToken() knows to keep looking when
+        a lexer rule finishes with token set to SKIP_TOKEN.  Recall that
+        if token==null at end of any token rule, it creates one for you
+        and emits it.
+        """
+        
+        self._state.token = SKIP_TOKEN
+
+
+    def mTokens(self):
+        """This is the lexer entry point that sets instance var 'token'"""
+
+        # abstract method
+        raise NotImplementedError
+    
+
+    def setCharStream(self, input):
+        """Set the char stream and reset the lexer"""
+        self.input = None
+        self.reset()
+        self.input = input
+
+
+    def getSourceName(self):
+        return self.input.getSourceName()
+
+
+    def emit(self, token=None):
+        """
+        The standard method called to automatically emit a token at the
+        outermost lexical rule.  The token object should point into the
+        char buffer start..stop.  If there is a text override in 'text',
+        use that to set the token's text.  Override this method to emit
+        custom Token objects.
+
+        If you are building trees, then you should also override
+        Parser or TreeParser.getMissingSymbol().
+        """
+
+        if token is None:
+            token = CommonToken(
+                input=self.input,
+                type=self._state.type,
+                channel=self._state.channel,
+                start=self._state.tokenStartCharIndex,
+                stop=self.getCharIndex()-1
+                )
+            token.line = self._state.tokenStartLine
+            token.text = self._state.text
+            token.charPositionInLine = self._state.tokenStartCharPositionInLine
+
+        self._state.token = token
+        
+        return token
+
+
+    def match(self, s):
+        if isinstance(s, basestring):
+            for c in s:
+                if self.input.LA(1) != ord(c):
+                    if self._state.backtracking > 0:
+                        raise BacktrackingFailed
+
+                    mte = MismatchedTokenException(c, self.input)
+                    self.recover(mte)
+                    raise mte
+
+                self.input.consume()
+
+        else:
+            if self.input.LA(1) != s:
+                if self._state.backtracking > 0:
+                    raise BacktrackingFailed
+
+                mte = MismatchedTokenException(unichr(s), self.input)
+                self.recover(mte) # don't really recover; just consume in lexer
+                raise mte
+        
+            self.input.consume()
+            
+
+    def matchAny(self):
+        self.input.consume()
+
+
+    def matchRange(self, a, b):
+        if self.input.LA(1) < a or self.input.LA(1) > b:
+            if self._state.backtracking > 0:
+                raise BacktrackingFailed
+
+            mre = MismatchedRangeException(unichr(a), unichr(b), self.input)
+            self.recover(mre)
+            raise mre
+
+        self.input.consume()
+
+
+    def getLine(self):
+        return self.input.line
+
+
+    def getCharPositionInLine(self):
+        return self.input.charPositionInLine
+
+
+    def getCharIndex(self):
+        """What is the index of the current character of lookahead?"""
+        
+        return self.input.index()
+
+
+    def getText(self):
+        """
+        Return the text matched so far for the current token or any
+        text override.
+        """
+        if self._state.text is not None:
+            return self._state.text
+        
+        return self.input.substring(
+            self._state.tokenStartCharIndex,
+            self.getCharIndex()-1
+            )
+
+
+    def setText(self, text):
+        """
+        Set the complete text of this token; it wipes any previous
+        changes to the text.
+        """
+        self._state.text = text
+
+
+    text = property(getText, setText)
+
+
+    def reportError(self, e):
+        ## TODO: not thought about recovery in lexer yet.
+
+        ## # if we've already reported an error and have not matched a token
+        ## # yet successfully, don't report any errors.
+        ## if self.errorRecovery:
+        ##     #System.err.print("[SPURIOUS] ");
+        ##     return;
+        ## 
+        ## self.errorRecovery = True
+
+        self.displayRecognitionError(self.tokenNames, e)
+
+
+    def getErrorMessage(self, e, tokenNames):
+        msg = None
+        
+        if isinstance(e, MismatchedTokenException):
+            msg = "mismatched character " \
+                  + self.getCharErrorDisplay(e.c) \
+                  + " expecting " \
+                  + self.getCharErrorDisplay(e.expecting)
+
+        elif isinstance(e, NoViableAltException):
+            msg = "no viable alternative at character " \
+                  + self.getCharErrorDisplay(e.c)
+
+        elif isinstance(e, EarlyExitException):
+            msg = "required (...)+ loop did not match anything at character " \
+                  + self.getCharErrorDisplay(e.c)
+            
+        elif isinstance(e, MismatchedNotSetException):
+            msg = "mismatched character " \
+                  + self.getCharErrorDisplay(e.c) \
+                  + " expecting set " \
+                  + repr(e.expecting)
+
+        elif isinstance(e, MismatchedSetException):
+            msg = "mismatched character " \
+                  + self.getCharErrorDisplay(e.c) \
+                  + " expecting set " \
+                  + repr(e.expecting)
+
+        elif isinstance(e, MismatchedRangeException):
+            msg = "mismatched character " \
+                  + self.getCharErrorDisplay(e.c) \
+                  + " expecting set " \
+                  + self.getCharErrorDisplay(e.a) \
+                  + ".." \
+                  + self.getCharErrorDisplay(e.b)
+
+        else:
+            msg = BaseRecognizer.getErrorMessage(self, e, tokenNames)
+
+        return msg
+
+
+    def getCharErrorDisplay(self, c):
+        if c == EOF:
+            c = '<EOF>'
+        return repr(c)
+
+
+    def recover(self, re):
+        """
+        Lexers can normally match any char in it's vocabulary after matching
+        a token, so do the easy thing and just kill a character and hope
+        it all works out.  You can instead use the rule invocation stack
+        to do sophisticated error recovery if you are in a fragment rule.
+        """
+
+        self.input.consume()
+
+
+    def traceIn(self, ruleName, ruleIndex):
+        inputSymbol = "%s line=%d:%s" % (self.input.LT(1),
+                                         self.getLine(),
+                                         self.getCharPositionInLine()
+                                         )
+        
+        BaseRecognizer.traceIn(self, ruleName, ruleIndex, inputSymbol)
+
+
+    def traceOut(self, ruleName, ruleIndex):
+        inputSymbol = "%s line=%d:%s" % (self.input.LT(1),
+                                         self.getLine(),
+                                         self.getCharPositionInLine()
+                                         )
+
+        BaseRecognizer.traceOut(self, ruleName, ruleIndex, inputSymbol)
+
+
+
+class Parser(BaseRecognizer):
+    """
+    @brief Baseclass for generated parser classes.
+    """
+    
+    def __init__(self, lexer, state=None):
+        BaseRecognizer.__init__(self, state)
+
+        self.setTokenStream(lexer)
+
+
+    def reset(self):
+        BaseRecognizer.reset(self) # reset all recognizer state variables
+        if self.input is not None:
+            self.input.seek(0) # rewind the input
+
+
+    def getCurrentInputSymbol(self, input):
+        return input.LT(1)
+
+
+    def getMissingSymbol(self, input, e, expectedTokenType, follow):
+        if expectedTokenType == EOF:
+            tokenText = "<missing EOF>"
+        else:
+            tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
+        t = CommonToken(type=expectedTokenType, text=tokenText)
+        current = input.LT(1)
+        if current.type == EOF:
+            current = input.LT(-1)
+
+        if current is not None:
+            t.line = current.line
+            t.charPositionInLine = current.charPositionInLine
+        t.channel = DEFAULT_CHANNEL
+        return t
+
+
+    def setTokenStream(self, input):
+        """Set the token stream and reset the parser"""
+        
+        self.input = None
+        self.reset()
+        self.input = input
+
+
+    def getTokenStream(self):
+        return self.input
+
+
+    def getSourceName(self):
+        return self.input.getSourceName()
+
+
+    def traceIn(self, ruleName, ruleIndex):
+        BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
+
+
+    def traceOut(self, ruleName, ruleIndex):
+        BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
+
+
+class RuleReturnScope(object):
+    """
+    Rules can return start/stop info as well as possible trees and templates.
+    """
+
+    def getStart(self):
+        """Return the start token or tree."""
+        return None
+    
+
+    def getStop(self):
+        """Return the stop token or tree."""
+        return None
+
+    
+    def getTree(self):
+        """Has a value potentially if output=AST."""
+        return None
+
+
+    def getTemplate(self):
+        """Has a value potentially if output=template."""
+        return None
+
+
+class ParserRuleReturnScope(RuleReturnScope):
+    """
+    Rules that return more than a single value must return an object
+    containing all the values.  Besides the properties defined in
+    RuleLabelScope.predefinedRulePropertiesScope there may be user-defined
+    return values.  This class simply defines the minimum properties that
+    are always defined and methods to access the others that might be
+    available depending on output option such as template and tree.
+
+    Note text is not an actual property of the return value, it is computed
+    from start and stop using the input stream's toString() method.  I
+    could add a ctor to this so that we can pass in and store the input
+    stream, but I'm not sure we want to do that.  It would seem to be undefined
+    to get the .text property anyway if the rule matches tokens from multiple
+    input streams.
+
+    I do not use getters for fields of objects that are used simply to
+    group values such as this aggregate.  The getters/setters are there to
+    satisfy the superclass interface.
+    """
+
+    def __init__(self):
+        self.start = None
+        self.stop = None
+
+    
+    def getStart(self):
+        return self.start
+
+
+    def getStop(self):
+        return self.stop
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/streams.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,1452 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+import codecs
+from StringIO import StringIO
+
+from antlr3.constants import DEFAULT_CHANNEL, EOF
+from antlr3.tokens import Token, EOF_TOKEN
+
+
+############################################################################
+#
+# basic interfaces
+#   IntStream
+#    +- CharStream
+#    \- TokenStream
+#
+# subclasses must implemented all methods
+#
+############################################################################
+
+class IntStream(object):
+    """
+    @brief Base interface for streams of integer values.
+
+    A simple stream of integers used when all I care about is the char
+    or token type sequence (such as interpretation).
+    """
+
+    def consume(self):
+        raise NotImplementedError
+    
+
+    def LA(self, i):
+        """Get int at current input pointer + i ahead where i=1 is next int.
+
+        Negative indexes are allowed.  LA(-1) is previous token (token
+	just matched).  LA(-i) where i is before first token should
+	yield -1, invalid char / EOF.
+	"""
+        
+        raise NotImplementedError
+        
+
+    def mark(self):
+        """
+        Tell the stream to start buffering if it hasn't already.  Return
+        current input position, index(), or some other marker so that
+        when passed to rewind() you get back to the same spot.
+        rewind(mark()) should not affect the input cursor.  The Lexer
+        track line/col info as well as input index so its markers are
+        not pure input indexes.  Same for tree node streams.
+        """
+
+        raise NotImplementedError
+
+
+    def index(self):
+        """
+        Return the current input symbol index 0..n where n indicates the
+        last symbol has been read.  The index is the symbol about to be
+        read not the most recently read symbol.
+        """
+
+        raise NotImplementedError
+
+
+    def rewind(self, marker=None):
+        """
+        Reset the stream so that next call to index would return marker.
+        The marker will usually be index() but it doesn't have to be.  It's
+        just a marker to indicate what state the stream was in.  This is
+        essentially calling release() and seek().  If there are markers
+        created after this marker argument, this routine must unroll them
+        like a stack.  Assume the state the stream was in when this marker
+        was created.
+
+        If marker is None:
+        Rewind to the input position of the last marker.
+        Used currently only after a cyclic DFA and just
+        before starting a sem/syn predicate to get the
+        input position back to the start of the decision.
+        Do not "pop" the marker off the state.  mark(i)
+        and rewind(i) should balance still. It is
+        like invoking rewind(last marker) but it should not "pop"
+        the marker off.  It's like seek(last marker's input position).       
+	"""
+
+        raise NotImplementedError
+
+
+    def release(self, marker=None):
+        """
+        You may want to commit to a backtrack but don't want to force the
+        stream to keep bookkeeping objects around for a marker that is
+        no longer necessary.  This will have the same behavior as
+        rewind() except it releases resources without the backward seek.
+        This must throw away resources for all markers back to the marker
+        argument.  So if you're nested 5 levels of mark(), and then release(2)
+        you have to release resources for depths 2..5.
+	"""
+
+        raise NotImplementedError
+
+
+    def seek(self, index):
+        """
+        Set the input cursor to the position indicated by index.  This is
+        normally used to seek ahead in the input stream.  No buffering is
+        required to do this unless you know your stream will use seek to
+        move backwards such as when backtracking.
+
+        This is different from rewind in its multi-directional
+        requirement and in that its argument is strictly an input cursor
+        (index).
+
+        For char streams, seeking forward must update the stream state such
+        as line number.  For seeking backwards, you will be presumably
+        backtracking using the mark/rewind mechanism that restores state and
+        so this method does not need to update state when seeking backwards.
+
+        Currently, this method is only used for efficient backtracking using
+        memoization, but in the future it may be used for incremental parsing.
+
+        The index is 0..n-1.  A seek to position i means that LA(1) will
+        return the ith symbol.  So, seeking to 0 means LA(1) will return the
+        first element in the stream. 
+        """
+
+        raise NotImplementedError
+
+
+    def size(self):
+        """
+        Only makes sense for streams that buffer everything up probably, but
+        might be useful to display the entire stream or for testing.  This
+        value includes a single EOF.
+	"""
+
+        raise NotImplementedError
+
+
+    def getSourceName(self):
+        """
+        Where are you getting symbols from?  Normally, implementations will
+        pass the buck all the way to the lexer who can ask its input stream
+        for the file name or whatever.
+        """
+
+        raise NotImplementedError
+
+
+class CharStream(IntStream):
+    """
+    @brief A source of characters for an ANTLR lexer.
+
+    This is an abstract class that must be implemented by a subclass.
+    
+    """
+
+    # pylint does not realize that this is an interface, too
+    #pylint: disable-msg=W0223
+    
+    EOF = -1
+
+
+    def substring(self, start, stop):
+        """
+        For infinite streams, you don't need this; primarily I'm providing
+        a useful interface for action code.  Just make sure actions don't
+        use this on streams that don't support it.
+        """
+
+        raise NotImplementedError
+        
+    
+    def LT(self, i):
+        """
+        Get the ith character of lookahead.  This is the same usually as
+        LA(i).  This will be used for labels in the generated
+        lexer code.  I'd prefer to return a char here type-wise, but it's
+        probably better to be 32-bit clean and be consistent with LA.
+        """
+
+        raise NotImplementedError
+
+
+    def getLine(self):
+        """ANTLR tracks the line information automatically"""
+
+        raise NotImplementedError
+
+
+    def setLine(self, line):
+        """
+        Because this stream can rewind, we need to be able to reset the line
+        """
+
+        raise NotImplementedError
+
+
+    def getCharPositionInLine(self):
+        """
+        The index of the character relative to the beginning of the line 0..n-1
+        """
+
+        raise NotImplementedError
+
+
+    def setCharPositionInLine(self, pos):
+        raise NotImplementedError
+
+
+class TokenStream(IntStream):
+    """
+
+    @brief A stream of tokens accessing tokens from a TokenSource
+
+    This is an abstract class that must be implemented by a subclass.
+    
+    """
+    
+    # pylint does not realize that this is an interface, too
+    #pylint: disable-msg=W0223
+    
+    def LT(self, k):
+        """
+        Get Token at current input pointer + i ahead where i=1 is next Token.
+        i<0 indicates tokens in the past.  So -1 is previous token and -2 is
+        two tokens ago. LT(0) is undefined.  For i>=n, return Token.EOFToken.
+        Return null for LT(0) and any index that results in an absolute address
+        that is negative.
+	"""
+
+        raise NotImplementedError
+
+
+    def get(self, i):
+        """
+        Get a token at an absolute index i; 0..n-1.  This is really only
+        needed for profiling and debugging and token stream rewriting.
+        If you don't want to buffer up tokens, then this method makes no
+        sense for you.  Naturally you can't use the rewrite stream feature.
+        I believe DebugTokenStream can easily be altered to not use
+        this method, removing the dependency.
+        """
+
+        raise NotImplementedError
+
+
+    def getTokenSource(self):
+        """
+        Where is this stream pulling tokens from?  This is not the name, but
+        the object that provides Token objects.
+	"""
+
+        raise NotImplementedError
+
+
+    def toString(self, start=None, stop=None):
+        """
+        Return the text of all tokens from start to stop, inclusive.
+        If the stream does not buffer all the tokens then it can just
+        return "" or null;  Users should not access $ruleLabel.text in
+        an action of course in that case.
+
+        Because the user is not required to use a token with an index stored
+        in it, we must provide a means for two token objects themselves to
+        indicate the start/end location.  Most often this will just delegate
+        to the other toString(int,int).  This is also parallel with
+        the TreeNodeStream.toString(Object,Object).
+	"""
+
+        raise NotImplementedError
+
+        
+############################################################################
+#
+# character streams for use in lexers
+#   CharStream
+#   \- ANTLRStringStream
+#
+############################################################################
+
+
+class ANTLRStringStream(CharStream):
+    """
+    @brief CharStream that pull data from a unicode string.
+    
+    A pretty quick CharStream that pulls all data from an array
+    directly.  Every method call counts in the lexer.
+
+    """
+
+    
+    def __init__(self, data):
+        """
+        @param data This should be a unicode string holding the data you want
+           to parse. If you pass in a byte string, the Lexer will choke on
+           non-ascii data.
+           
+        """
+        
+        CharStream.__init__(self)
+        
+  	# The data being scanned
+        self.strdata = unicode(data)
+        self.data = [ord(c) for c in self.strdata]
+        
+	# How many characters are actually in the buffer
+        self.n = len(data)
+
+ 	# 0..n-1 index into string of next char
+        self.p = 0
+
+	# line number 1..n within the input
+        self.line = 1
+
+ 	# The index of the character relative to the beginning of the
+        # line 0..n-1
+        self.charPositionInLine = 0
+
+	# A list of CharStreamState objects that tracks the stream state
+        # values line, charPositionInLine, and p that can change as you
+        # move through the input stream.  Indexed from 0..markDepth-1.
+        self._markers = [ ]
+        self.lastMarker = None
+        self.markDepth = 0
+
+        # What is name or source of this char stream?
+        self.name = None
+
+
+    def reset(self):
+        """
+        Reset the stream so that it's in the same state it was
+        when the object was created *except* the data array is not
+        touched.
+        """
+        
+        self.p = 0
+        self.line = 1
+        self.charPositionInLine = 0
+        self._markers = [ ]
+
+
+    def consume(self):
+        try:
+            if self.data[self.p] == 10: # \n
+                self.line += 1
+                self.charPositionInLine = 0
+            else:
+                self.charPositionInLine += 1
+
+            self.p += 1
+            
+        except IndexError:
+            # happend when we reached EOF and self.data[self.p] fails
+            # just do nothing
+            pass
+
+
+
+    def LA(self, i):
+        if i == 0:
+            return 0 # undefined
+
+        if i < 0:
+            i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+
+        try:
+            return self.data[self.p+i-1]
+        except IndexError:
+            return EOF
+
+
+
+    def LT(self, i):
+        if i == 0:
+            return 0 # undefined
+
+        if i < 0:
+            i += 1 # e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
+
+        try:
+            return self.strdata[self.p+i-1]
+        except IndexError:
+            return EOF
+
+
+    def index(self):
+        """
+        Return the current input symbol index 0..n where n indicates the
+        last symbol has been read.  The index is the index of char to
+        be returned from LA(1).
+        """
+        
+        return self.p
+
+
+    def size(self):
+        return self.n
+
+
+    def mark(self):
+        state = (self.p, self.line, self.charPositionInLine)
+        try:
+            self._markers[self.markDepth] = state
+        except IndexError:
+            self._markers.append(state)
+        self.markDepth += 1
+        
+        self.lastMarker = self.markDepth
+        
+        return self.lastMarker
+
+
+    def rewind(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        p, line, charPositionInLine = self._markers[marker-1]
+
+        self.seek(p)
+        self.line = line
+        self.charPositionInLine = charPositionInLine
+        self.release(marker)
+
+
+    def release(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+
+        self.markDepth = marker-1
+
+
+    def seek(self, index):
+        """
+        consume() ahead until p==index; can't just set p=index as we must
+        update line and charPositionInLine.
+        """
+        
+        if index <= self.p:
+            self.p = index # just jump; don't update stream state (line, ...)
+            return
+
+        # seek forward, consume until p hits index
+        while self.p < index:
+            self.consume()
+
+
+    def substring(self, start, stop):
+        return self.strdata[start:stop+1]
+
+
+    def getLine(self):
+        """Using setter/getter methods is deprecated. Use o.line instead."""
+        return self.line
+
+
+    def getCharPositionInLine(self):
+        """
+        Using setter/getter methods is deprecated. Use o.charPositionInLine
+        instead.
+        """
+        return self.charPositionInLine
+
+
+    def setLine(self, line):
+        """Using setter/getter methods is deprecated. Use o.line instead."""
+        self.line = line
+
+
+    def setCharPositionInLine(self, pos):
+        """
+        Using setter/getter methods is deprecated. Use o.charPositionInLine
+        instead.
+        """
+        self.charPositionInLine = pos
+
+
+    def getSourceName(self):
+        return self.name
+
+
+class ANTLRFileStream(ANTLRStringStream):
+    """
+    @brief CharStream that opens a file to read the data.
+    
+    This is a char buffer stream that is loaded from a file
+    all at once when you construct the object.
+    """
+
+    def __init__(self, fileName, encoding=None):
+        """
+        @param fileName The path to the file to be opened. The file will be
+           opened with mode 'rb'.
+
+        @param encoding If you set the optional encoding argument, then the
+           data will be decoded on the fly.
+           
+        """
+        
+        self.fileName = fileName
+
+        fp = codecs.open(fileName, 'rb', encoding)
+        try:
+            data = fp.read()
+        finally:
+            fp.close()
+            
+        ANTLRStringStream.__init__(self, data)
+
+
+    def getSourceName(self):
+        """Deprecated, access o.fileName directly."""
+        
+        return self.fileName
+
+
+class ANTLRInputStream(ANTLRStringStream):
+    """
+    @brief CharStream that reads data from a file-like object.
+
+    This is a char buffer stream that is loaded from a file like object
+    all at once when you construct the object.
+    
+    All input is consumed from the file, but it is not closed.
+    """
+
+    def __init__(self, file, encoding=None):
+        """
+        @param file A file-like object holding your input. Only the read()
+           method must be implemented.
+
+        @param encoding If you set the optional encoding argument, then the
+           data will be decoded on the fly.
+           
+        """
+        
+        if encoding is not None:
+            # wrap input in a decoding reader
+            reader = codecs.lookup(encoding)[2]
+            file = reader(file)
+
+        data = file.read()
+            
+        ANTLRStringStream.__init__(self, data)
+
+
+# I guess the ANTLR prefix exists only to avoid a name clash with some Java
+# mumbojumbo. A plain "StringStream" looks better to me, which should be
+# the preferred name in Python.
+StringStream = ANTLRStringStream
+FileStream = ANTLRFileStream
+InputStream = ANTLRInputStream
+
+
+############################################################################
+#
+# Token streams
+#   TokenStream
+#   +- CommonTokenStream
+#   \- TokenRewriteStream
+#
+############################################################################
+
+
+class CommonTokenStream(TokenStream):
+    """
+    @brief The most common stream of tokens
+    
+    The most common stream of tokens is one where every token is buffered up
+    and tokens are prefiltered for a certain channel (the parser will only
+    see these tokens and cannot change the filter channel number during the
+    parse).
+    """
+
+    def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
+        """
+        @param tokenSource A TokenSource instance (usually a Lexer) to pull
+            the tokens from.
+
+        @param channel Skip tokens on any channel but this one; this is how we
+            skip whitespace...
+            
+        """
+        
+        TokenStream.__init__(self)
+        
+        self.tokenSource = tokenSource
+
+	# Record every single token pulled from the source so we can reproduce
+        # chunks of it later.
+        self.tokens = []
+
+	# Map<tokentype, channel> to override some Tokens' channel numbers
+        self.channelOverrideMap = {}
+
+	# Set<tokentype>; discard any tokens with this type
+        self.discardSet = set()
+
+	# Skip tokens on any channel but this one; this is how we skip whitespace...
+        self.channel = channel
+
+	# By default, track all incoming tokens
+        self.discardOffChannelTokens = False
+
+	# The index into the tokens list of the current token (next token
+        # to consume).  p==-1 indicates that the tokens list is empty
+        self.p = -1
+
+        # Remember last marked position
+        self.lastMarker = None
+        
+
+    def setTokenSource(self, tokenSource):
+        """Reset this token stream by setting its token source."""
+        
+        self.tokenSource = tokenSource
+        self.tokens = []
+        self.p = -1
+        self.channel = DEFAULT_CHANNEL
+
+
+    def reset(self):
+        self.p = 0
+        self.lastMarker = None
+
+
+    def fillBuffer(self):
+        """
+        Load all tokens from the token source and put in tokens.
+	This is done upon first LT request because you might want to
+        set some token type / channel overrides before filling buffer.
+        """
+        
+
+        index = 0
+        t = self.tokenSource.nextToken()
+        while t is not None and t.type != EOF:
+            discard = False
+            
+            if self.discardSet is not None and t.type in self.discardSet:
+                discard = True
+
+            elif self.discardOffChannelTokens and t.channel != self.channel:
+                discard = True
+
+            # is there a channel override for token type?
+            try:
+                overrideChannel = self.channelOverrideMap[t.type]
+                
+            except KeyError:
+                # no override for this type
+                pass
+            
+            else:
+                if overrideChannel == self.channel:
+                    t.channel = overrideChannel
+                else:
+                    discard = True
+            
+            if not discard:
+                t.index = index
+                self.tokens.append(t)
+                index += 1
+
+            t = self.tokenSource.nextToken()
+       
+        # leave p pointing at first token on channel
+        self.p = 0
+        self.p = self.skipOffTokenChannels(self.p)
+
+
+    def consume(self):
+        """
+        Move the input pointer to the next incoming token.  The stream
+        must become active with LT(1) available.  consume() simply
+        moves the input pointer so that LT(1) points at the next
+        input symbol. Consume at least one token.
+
+        Walk past any token not on the channel the parser is listening to.
+        """
+        
+        if self.p < len(self.tokens):
+            self.p += 1
+
+            self.p = self.skipOffTokenChannels(self.p) # leave p on valid token
+
+
+    def skipOffTokenChannels(self, i):
+        """
+        Given a starting index, return the index of the first on-channel
+        token.
+        """
+
+        try:
+            while self.tokens[i].channel != self.channel:
+                i += 1
+        except IndexError:
+            # hit the end of token stream
+            pass
+        
+        return i
+
+
+    def skipOffTokenChannelsReverse(self, i):
+        while i >= 0 and self.tokens[i].channel != self.channel:
+            i -= 1
+
+        return i
+
+
+    def setTokenTypeChannel(self, ttype, channel):
+        """
+        A simple filter mechanism whereby you can tell this token stream
+        to force all tokens of type ttype to be on channel.  For example,
+        when interpreting, we cannot exec actions so we need to tell
+        the stream to force all WS and NEWLINE to be a different, ignored
+        channel.
+	"""
+        
+        self.channelOverrideMap[ttype] = channel
+
+
+    def discardTokenType(self, ttype):
+        self.discardSet.add(ttype)
+
+
+    def getTokens(self, start=None, stop=None, types=None):
+        """
+        Given a start and stop index, return a list of all tokens in
+        the token type set.  Return None if no tokens were found.  This
+        method looks at both on and off channel tokens.
+        """
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if stop is None or stop >= len(self.tokens):
+            stop = len(self.tokens) - 1
+            
+        if start is None or stop < 0:
+            start = 0
+
+        if start > stop:
+            return None
+
+        if isinstance(types, (int, long)):
+            # called with a single type, wrap into set
+            types = set([types])
+            
+        filteredTokens = [
+            token for token in self.tokens[start:stop]
+            if types is None or token.type in types
+            ]
+
+        if len(filteredTokens) == 0:
+            return None
+
+        return filteredTokens
+
+
+    def LT(self, k):
+        """
+        Get the ith token from the current position 1..n where k=1 is the
+        first symbol of lookahead.
+        """
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if k == 0:
+            return None
+
+        if k < 0:
+            return self.LB(-k)
+                
+        i = self.p
+        n = 1
+        # find k good tokens
+        while n < k:
+            # skip off-channel tokens
+            i = self.skipOffTokenChannels(i+1) # leave p on valid token
+            n += 1
+
+        try:
+            return self.tokens[i]
+        except IndexError:
+            return EOF_TOKEN
+
+
+    def LB(self, k):
+        """Look backwards k tokens on-channel tokens"""
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        if k == 0:
+            return None
+
+        if self.p - k < 0:
+            return None
+
+        i = self.p
+        n = 1
+        # find k good tokens looking backwards
+        while n <= k:
+            # skip off-channel tokens
+            i = self.skipOffTokenChannelsReverse(i-1) # leave p on valid token
+            n += 1
+
+        if i < 0:
+            return None
+            
+        return self.tokens[i]
+
+
+    def get(self, i):
+        """
+        Return absolute token i; ignore which channel the tokens are on;
+        that is, count all tokens not just on-channel tokens.
+        """
+
+        return self.tokens[i]
+
+
+    def LA(self, i):
+        return self.LT(i).type
+
+
+    def mark(self):
+        self.lastMarker = self.index()
+        return self.lastMarker
+    
+
+    def release(self, marker=None):
+        # no resources to release
+        pass
+    
+
+    def size(self):
+        return len(self.tokens)
+
+
+    def index(self):
+        return self.p
+
+
+    def rewind(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+            
+        self.seek(marker)
+
+
+    def seek(self, index):
+        self.p = index
+
+
+    def getTokenSource(self):
+        return self.tokenSource
+
+
+    def getSourceName(self):
+        return self.tokenSource.getSourceName()
+
+
+    def toString(self, start=None, stop=None):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if start is None:
+            start = 0
+        elif not isinstance(start, int):
+            start = start.index
+
+        if stop is None:
+            stop = len(self.tokens) - 1
+        elif not isinstance(stop, int):
+            stop = stop.index
+        
+        if stop >= len(self.tokens):
+            stop = len(self.tokens) - 1
+
+        return ''.join([t.text for t in self.tokens[start:stop+1]])
+
+
+class RewriteOperation(object):
+    """@brief Internal helper class."""
+    
+    def __init__(self, stream, index, text):
+        self.stream = stream
+        self.index = index
+        self.text = text
+
+    def execute(self, buf):
+        """Execute the rewrite operation by possibly adding to the buffer.
+        Return the index of the next token to operate on.
+        """
+
+        return self.index
+
+    def toString(self):
+        opName = self.__class__.__name__
+        return '<%s@%d:"%s">' % (opName, self.index, self.text)
+
+    __str__ = toString
+    __repr__ = toString
+
+
+class InsertBeforeOp(RewriteOperation):
+    """@brief Internal helper class."""
+
+    def execute(self, buf):
+        buf.write(self.text)
+        buf.write(self.stream.tokens[self.index].text)
+        return self.index + 1
+
+
+class ReplaceOp(RewriteOperation):
+    """
+    @brief Internal helper class.
+    
+    I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+    instructions.
+    """
+
+    def __init__(self, stream, first, last, text):
+        RewriteOperation.__init__(self, stream, first, text)
+        self.lastIndex = last
+
+
+    def execute(self, buf):
+        if self.text is not None:
+            buf.write(self.text)
+
+        return self.lastIndex + 1
+
+
+    def toString(self):
+        return '<ReplaceOp@%d..%d:"%s">' % (
+            self.index, self.lastIndex, self.text)
+
+    __str__ = toString
+    __repr__ = toString
+
+
+class DeleteOp(ReplaceOp):
+    """
+    @brief Internal helper class.
+    """
+
+    def __init__(self, stream, first, last):
+        ReplaceOp.__init__(self, stream, first, last, None)
+
+
+    def toString(self):
+        return '<DeleteOp@%d..%d>' % (self.index, self.lastIndex)
+
+    __str__ = toString
+    __repr__ = toString
+
+
+class TokenRewriteStream(CommonTokenStream):
+    """@brief CommonTokenStream that can be modified.
+
+    Useful for dumping out the input stream after doing some
+    augmentation or other manipulations.
+
+    You can insert stuff, replace, and delete chunks.  Note that the
+    operations are done lazily--only if you convert the buffer to a
+    String.  This is very efficient because you are not moving data around
+    all the time.  As the buffer of tokens is converted to strings, the
+    toString() method(s) check to see if there is an operation at the
+    current index.  If so, the operation is done and then normal String
+    rendering continues on the buffer.  This is like having multiple Turing
+    machine instruction streams (programs) operating on a single input tape. :)
+
+    Since the operations are done lazily at toString-time, operations do not
+    screw up the token index values.  That is, an insert operation at token
+    index i does not change the index values for tokens i+1..n-1.
+
+    Because operations never actually alter the buffer, you may always get
+    the original token stream back without undoing anything.  Since
+    the instructions are queued up, you can easily simulate transactions and
+    roll back any changes if there is an error just by removing instructions.
+    For example,
+
+     CharStream input = new ANTLRFileStream("input");
+     TLexer lex = new TLexer(input);
+     TokenRewriteStream tokens = new TokenRewriteStream(lex);
+     T parser = new T(tokens);
+     parser.startRule();
+
+     Then in the rules, you can execute
+        Token t,u;
+        ...
+        input.insertAfter(t, "text to put after t");}
+        input.insertAfter(u, "text after u");}
+        System.out.println(tokens.toString());
+
+    Actually, you have to cast the 'input' to a TokenRewriteStream. :(
+
+    You can also have multiple "instruction streams" and get multiple
+    rewrites from a single pass over the input.  Just name the instruction
+    streams and use that name again when printing the buffer.  This could be
+    useful for generating a C file and also its header file--all from the
+    same buffer:
+
+        tokens.insertAfter("pass1", t, "text to put after t");}
+        tokens.insertAfter("pass2", u, "text after u");}
+        System.out.println(tokens.toString("pass1"));
+        System.out.println(tokens.toString("pass2"));
+
+    If you don't use named rewrite streams, a "default" stream is used as
+    the first example shows.
+    """
+    
+    DEFAULT_PROGRAM_NAME = "default"
+    MIN_TOKEN_INDEX = 0
+
+    def __init__(self, tokenSource=None, channel=DEFAULT_CHANNEL):
+        CommonTokenStream.__init__(self, tokenSource, channel)
+
+        # You may have multiple, named streams of rewrite operations.
+        # I'm calling these things "programs."
+        #  Maps String (name) -> rewrite (List)
+        self.programs = {}
+        self.programs[self.DEFAULT_PROGRAM_NAME] = []
+        
+ 	# Map String (program name) -> Integer index
+        self.lastRewriteTokenIndexes = {}
+        
+
+    def rollback(self, *args):
+        """
+        Rollback the instruction stream for a program so that
+        the indicated instruction (via instructionIndex) is no
+        longer in the stream.  UNTESTED!
+        """
+
+        if len(args) == 2:
+            programName = args[0]
+            instructionIndex = args[1]
+        elif len(args) == 1:
+            programName = self.DEFAULT_PROGRAM_NAME
+            instructionIndex = args[0]
+        else:
+            raise TypeError("Invalid arguments")
+        
+        p = self.programs.get(programName, None)
+        if p is not None:
+            self.programs[programName] = (
+                p[self.MIN_TOKEN_INDEX:instructionIndex])
+
+
+    def deleteProgram(self, programName=DEFAULT_PROGRAM_NAME):
+        """Reset the program so that no instructions exist"""
+            
+        self.rollback(programName, self.MIN_TOKEN_INDEX)
+
+
+    def insertAfter(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            index = args[0]
+            text = args[1]
+            
+        elif len(args) == 3:
+            programName = args[0]
+            index = args[1]
+            text = args[2]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(index, Token):
+            # index is a Token, grap the stream index from it
+            index = index.index
+
+        # to insert after, just insert before next index (even if past end)
+        self.insertBefore(programName, index+1, text)
+
+
+    def insertBefore(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            index = args[0]
+            text = args[1]
+            
+        elif len(args) == 3:
+            programName = args[0]
+            index = args[1]
+            text = args[2]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(index, Token):
+            # index is a Token, grap the stream index from it
+            index = index.index
+
+        op = InsertBeforeOp(self, index, text)
+        rewrites = self.getProgram(programName)
+        rewrites.append(op)
+
+
+    def replace(self, *args):
+        if len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            first = args[0]
+            last = args[0]
+            text = args[1]
+            
+        elif len(args) == 3:
+            programName = self.DEFAULT_PROGRAM_NAME
+            first = args[0]
+            last = args[1]
+            text = args[2]
+            
+        elif len(args) == 4:
+            programName = args[0]
+            first = args[1]
+            last = args[2]
+            text = args[3]
+
+        else:
+            raise TypeError("Invalid arguments")
+
+        if isinstance(first, Token):
+            # first is a Token, grap the stream index from it
+            first = first.index
+
+        if isinstance(last, Token):
+            # last is a Token, grap the stream index from it
+            last = last.index
+
+        if first > last or first < 0 or last < 0 or last >= len(self.tokens):
+            raise ValueError(
+                "replace: range invalid: "+first+".."+last+
+                "(size="+len(self.tokens)+")")
+
+        op = ReplaceOp(self, first, last, text)
+        rewrites = self.getProgram(programName)
+        rewrites.append(op)
+        
+
+    def delete(self, *args):
+        self.replace(*(list(args) + [None]))
+
+
+    def getLastRewriteTokenIndex(self, programName=DEFAULT_PROGRAM_NAME):
+        return self.lastRewriteTokenIndexes.get(programName, -1)
+
+
+    def setLastRewriteTokenIndex(self, programName, i):
+        self.lastRewriteTokenIndexes[programName] = i
+
+
+    def getProgram(self, name):
+        p = self.programs.get(name, None)
+        if p is  None:
+            p = self.initializeProgram(name)
+
+        return p
+
+
+    def initializeProgram(self, name):
+        p = []
+        self.programs[name] = p
+        return p
+
+
+    def toOriginalString(self, start=None, end=None):
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        if end is None:
+            end = self.size() - 1
+        
+        buf = StringIO()
+        i = start
+        while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
+            buf.write(self.get(i).text)
+            i += 1
+
+        return buf.getvalue()
+
+
+    def toString(self, *args):
+        if len(args) == 0:
+            programName = self.DEFAULT_PROGRAM_NAME
+            start = self.MIN_TOKEN_INDEX
+            end = self.size() - 1
+            
+        elif len(args) == 1:
+            programName = args[0]
+            start = self.MIN_TOKEN_INDEX
+            end = self.size() - 1
+
+        elif len(args) == 2:
+            programName = self.DEFAULT_PROGRAM_NAME
+            start = args[0]
+            end = args[1]
+            
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        elif not isinstance(start, int):
+            start = start.index
+
+        if end is None:
+            end = len(self.tokens) - 1
+        elif not isinstance(end, int):
+            end = end.index
+
+        # ensure start/end are in range
+        if end >= len(self.tokens):
+            end = len(self.tokens) - 1
+
+        if start < 0:
+            start = 0
+
+        rewrites = self.programs.get(programName)
+        if rewrites is None or len(rewrites) == 0:
+            # no instructions to execute
+            return self.toOriginalString(start, end)
+        
+        buf = StringIO()
+
+        # First, optimize instruction stream
+        indexToOp = self.reduceToSingleOperationPerIndex(rewrites)
+
+        # Walk buffer, executing instructions and emitting tokens
+        i = start
+        while i <= end and i < len(self.tokens):
+            op = indexToOp.get(i)
+            # remove so any left have index size-1
+            try:
+                del indexToOp[i]
+            except KeyError:
+                pass
+
+            t = self.tokens[i]
+            if op is None:
+                # no operation at that index, just dump token
+                buf.write(t.text)
+                i += 1 # move to next token
+
+            else:
+                i = op.execute(buf) # execute operation and skip
+
+        # include stuff after end if it's last index in buffer
+        # So, if they did an insertAfter(lastValidIndex, "foo"), include
+        # foo if end==lastValidIndex.
+        if end == len(self.tokens) - 1:
+            # Scan any remaining operations after last token
+            # should be included (they will be inserts).
+            for i in sorted(indexToOp.keys()):
+                op = indexToOp[i]
+                if op.index >= len(self.tokens)-1:
+                    buf.write(op.text)
+
+        return buf.getvalue()
+
+    __str__ = toString
+
+
+    def reduceToSingleOperationPerIndex(self, rewrites):
+        """
+        We need to combine operations and report invalid operations (like
+        overlapping replaces that are not completed nested).  Inserts to
+        same index need to be combined etc...   Here are the cases:
+
+        I.i.u I.j.v                           leave alone, nonoverlapping
+        I.i.u I.i.v                           combine: Iivu
+
+        R.i-j.u R.x-y.v | i-j in x-y          delete first R
+        R.i-j.u R.i-j.v                       delete first R
+        R.i-j.u R.x-y.v | x-y in i-j          ERROR
+        R.i-j.u R.x-y.v | boundaries overlap  ERROR
+
+        I.i.u R.x-y.v   | i in x-y            delete I
+        I.i.u R.x-y.v   | i not in x-y        leave alone, nonoverlapping
+        R.x-y.v I.i.u   | i in x-y            ERROR
+        R.x-y.v I.x.u                         R.x-y.uv (combine, delete I)
+        R.x-y.v I.i.u   | i not in x-y        leave alone, nonoverlapping
+
+        I.i.u = insert u before op @ index i
+        R.x-y.u = replace x-y indexed tokens with u
+
+        First we need to examine replaces.  For any replace op:
+
+          1. wipe out any insertions before op within that range.
+          2. Drop any replace op before that is contained completely within
+             that range.
+          3. Throw exception upon boundary overlap with any previous replace.
+
+        Then we can deal with inserts:
+
+          1. for any inserts to same index, combine even if not adjacent.
+          2. for any prior replace with same left boundary, combine this
+             insert with replace and delete this replace.
+          3. throw exception if index in same range as previous replace
+
+        Don't actually delete; make op null in list. Easier to walk list.
+        Later we can throw as we add to index -> op map.
+
+        Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+        inserted stuff would be before the replace range.  But, if you
+        add tokens in front of a method body '{' and then delete the method
+        body, I think the stuff before the '{' you added should disappear too.
+
+        Return a map from token index to operation.
+        """
+        
+        # WALK REPLACES
+        for i, rop in enumerate(rewrites):
+            if rop is None:
+                continue
+
+            if not isinstance(rop, ReplaceOp):
+                continue
+
+            # Wipe prior inserts within range
+            for j, iop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
+                if iop.index >= rop.index and iop.index <= rop.lastIndex:
+                    rewrites[j] = None  # delete insert as it's a no-op.
+
+            # Drop any prior replaces contained within
+            for j, prevRop in self.getKindOfOps(rewrites, ReplaceOp, i):
+                if (prevRop.index >= rop.index
+                    and prevRop.lastIndex <= rop.lastIndex):
+                    rewrites[j] = None  # delete replace as it's a no-op.
+                    continue
+
+                # throw exception unless disjoint or identical
+                disjoint = (prevRop.lastIndex < rop.index
+                            or prevRop.index > rop.lastIndex)
+                same = (prevRop.index == rop.index
+                        and prevRop.lastIndex == rop.lastIndex)
+                if not disjoint and not same:
+                    raise ValueError(
+                        "replace op boundaries of %s overlap with previous %s"
+                        % (rop, prevRop))
+
+        # WALK INSERTS
+        for i, iop in enumerate(rewrites):
+            if iop is None:
+                continue
+
+            if not isinstance(iop, InsertBeforeOp):
+                continue
+
+            # combine current insert with prior if any at same index
+            for j, prevIop in self.getKindOfOps(rewrites, InsertBeforeOp, i):
+                if prevIop.index == iop.index: # combine objects
+                    # convert to strings...we're in process of toString'ing
+                    # whole token buffer so no lazy eval issue with any
+                    # templates
+                    iop.text = self.catOpText(iop.text, prevIop.text)
+                    rewrites[j] = None  # delete redundant prior insert
+
+            # look for replaces where iop.index is in range; error
+            for j, rop in self.getKindOfOps(rewrites, ReplaceOp, i):
+                if iop.index == rop.index:
+                    rop.text = self.catOpText(iop.text, rop.text)
+                    rewrites[i] = None  # delete current insert
+                    continue
+
+                if iop.index >= rop.index and iop.index <= rop.lastIndex:
+                    raise ValueError(
+                        "insert op %s within boundaries of previous %s"
+                        % (iop, rop))
+        
+        m = {}
+        for i, op in enumerate(rewrites):
+            if op is None:
+                continue # ignore deleted ops
+
+            assert op.index not in m, "should only be one op per index"
+            m[op.index] = op
+
+        return m
+
+
+    def catOpText(self, a, b):
+        x = ""
+        y = ""
+        if a is not None:
+            x = a
+        if b is not None:
+            y = b
+        return x + y
+
+
+    def getKindOfOps(self, rewrites, kind, before=None):
+        if before is None:
+            before = len(rewrites)
+        elif before > len(rewrites):
+            before = len(rewrites)
+
+        for i, op in enumerate(rewrites[:before]):
+            if op is None:
+                # ignore deleted
+                continue
+            if op.__class__ == kind:
+                yield i, op
+
+
+    def toDebugString(self, start=None, end=None):
+        if start is None:
+            start = self.MIN_TOKEN_INDEX
+        if end is None:
+            end = self.size() - 1
+
+        buf = StringIO()
+        i = start
+        while i >= self.MIN_TOKEN_INDEX and i <= end and i < len(self.tokens):
+            buf.write(self.get(i))
+            i += 1
+
+        return buf.getvalue()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/tokens.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,416 @@
+"""ANTLR3 runtime package"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+from antlr3.constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE
+
+############################################################################
+#
+# basic token interface
+#
+############################################################################
+
+class Token(object):
+    """@brief Abstract token baseclass."""
+
+    def getText(self):
+        """@brief Get the text of the token.
+
+        Using setter/getter methods is deprecated. Use o.text instead.
+        """
+        raise NotImplementedError
+    
+    def setText(self, text):
+        """@brief Set the text of the token.
+
+        Using setter/getter methods is deprecated. Use o.text instead.
+        """
+        raise NotImplementedError
+
+
+    def getType(self):
+        """@brief Get the type of the token.
+
+        Using setter/getter methods is deprecated. Use o.type instead."""
+
+        raise NotImplementedError
+    
+    def setType(self, ttype):
+        """@brief Get the type of the token.
+
+        Using setter/getter methods is deprecated. Use o.type instead."""
+
+        raise NotImplementedError
+    
+    
+    def getLine(self):
+        """@brief Get the line number on which this token was matched
+
+        Lines are numbered 1..n
+        
+        Using setter/getter methods is deprecated. Use o.line instead."""
+
+        raise NotImplementedError
+    
+    def setLine(self, line):
+        """@brief Set the line number on which this token was matched
+
+        Using setter/getter methods is deprecated. Use o.line instead."""
+
+        raise NotImplementedError
+    
+    
+    def getCharPositionInLine(self):
+        """@brief Get the column of the tokens first character,
+        
+        Columns are numbered 0..n-1
+        
+        Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
+
+        raise NotImplementedError
+    
+    def setCharPositionInLine(self, pos):
+        """@brief Set the column of the tokens first character,
+
+        Using setter/getter methods is deprecated. Use o.charPositionInLine instead."""
+
+        raise NotImplementedError
+    
+
+    def getChannel(self):
+        """@brief Get the channel of the token
+
+        Using setter/getter methods is deprecated. Use o.channel instead."""
+
+        raise NotImplementedError
+    
+    def setChannel(self, channel):
+        """@brief Set the channel of the token
+
+        Using setter/getter methods is deprecated. Use o.channel instead."""
+
+        raise NotImplementedError
+    
+
+    def getTokenIndex(self):
+        """@brief Get the index in the input stream.
+
+        An index from 0..n-1 of the token object in the input stream.
+        This must be valid in order to use the ANTLRWorks debugger.
+        
+        Using setter/getter methods is deprecated. Use o.index instead."""
+
+        raise NotImplementedError
+    
+    def setTokenIndex(self, index):
+        """@brief Set the index in the input stream.
+
+        Using setter/getter methods is deprecated. Use o.index instead."""
+
+        raise NotImplementedError
+
+
+    def getInputStream(self):
+        """@brief From what character stream was this token created.
+
+        You don't have to implement but it's nice to know where a Token
+        comes from if you have include files etc... on the input."""
+
+        raise NotImplementedError
+
+    def setInputStream(self, input):
+        """@brief From what character stream was this token created.
+
+        You don't have to implement but it's nice to know where a Token
+        comes from if you have include files etc... on the input."""
+
+        raise NotImplementedError
+
+
+############################################################################
+#
+# token implementations
+#
+# Token
+# +- CommonToken
+# \- ClassicToken
+#
+############################################################################
+
+class CommonToken(Token):
+    """@brief Basic token implementation.
+
+    This implementation does not copy the text from the input stream upon
+    creation, but keeps start/stop pointers into the stream to avoid
+    unnecessary copy operations.
+
+    """
+    
+    def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None,
+                 input=None, start=None, stop=None, oldToken=None):
+        Token.__init__(self)
+        
+        if oldToken is not None:
+            self.type = oldToken.type
+            self.line = oldToken.line
+            self.charPositionInLine = oldToken.charPositionInLine
+            self.channel = oldToken.channel
+            self.index = oldToken.index
+            self._text = oldToken._text
+            if isinstance(oldToken, CommonToken):
+                self.input = oldToken.input
+                self.start = oldToken.start
+                self.stop = oldToken.stop
+            
+        else:
+            self.type = type
+            self.input = input
+            self.charPositionInLine = -1 # set to invalid position
+            self.line = 0
+            self.channel = channel
+            
+	    #What token number is this from 0..n-1 tokens; < 0 implies invalid index
+            self.index = -1
+            
+            # We need to be able to change the text once in a while.  If
+            # this is non-null, then getText should return this.  Note that
+            # start/stop are not affected by changing this.
+            self._text = text
+
+            # The char position into the input buffer where this token starts
+            self.start = start
+
+            # The char position into the input buffer where this token stops
+            # This is the index of the last char, *not* the index after it!
+            self.stop = stop
+
+
+    def getText(self):
+        if self._text is not None:
+            return self._text
+
+        if self.input is None:
+            return None
+        
+        return self.input.substring(self.start, self.stop)
+
+
+    def setText(self, text):
+        """
+        Override the text for this token.  getText() will return this text
+        rather than pulling from the buffer.  Note that this does not mean
+        that start/stop indexes are not valid.  It means that that input
+        was converted to a new string in the token object.
+	"""
+        self._text = text
+
+    text = property(getText, setText)
+
+
+    def getType(self):
+        return self.type 
+
+    def setType(self, ttype):
+        self.type = ttype
+
+    
+    def getLine(self):
+        return self.line
+    
+    def setLine(self, line):
+        self.line = line
+
+
+    def getCharPositionInLine(self):
+        return self.charPositionInLine
+    
+    def setCharPositionInLine(self, pos):
+        self.charPositionInLine = pos
+
+
+    def getChannel(self):
+        return self.channel
+    
+    def setChannel(self, channel):
+        self.channel = channel
+    
+
+    def getTokenIndex(self):
+        return self.index
+    
+    def setTokenIndex(self, index):
+        self.index = index
+
+
+    def getInputStream(self):
+        return self.input
+
+    def setInputStream(self, input):
+        self.input = input
+
+
+    def __str__(self):
+        if self.type == EOF:
+            return "<EOF>"
+
+        channelStr = ""
+        if self.channel > 0:
+            channelStr = ",channel=" + str(self.channel)
+
+        txt = self.text
+        if txt is not None:
+            txt = txt.replace("\n","\\\\n")
+            txt = txt.replace("\r","\\\\r")
+            txt = txt.replace("\t","\\\\t")
+        else:
+            txt = "<no text>"
+
+        return "[@%d,%d:%d=%r,<%d>%s,%d:%d]" % (
+            self.index,
+            self.start, self.stop,
+            txt,
+            self.type, channelStr,
+            self.line, self.charPositionInLine
+            )
+    
+
+class ClassicToken(Token):
+    """@brief Alternative token implementation.
+    
+    A Token object like we'd use in ANTLR 2.x; has an actual string created
+    and associated with this object.  These objects are needed for imaginary
+    tree nodes that have payload objects.  We need to create a Token object
+    that has a string; the tree node will point at this token.  CommonToken
+    has indexes into a char stream and hence cannot be used to introduce
+    new strings.
+    """
+
+    def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL,
+                 oldToken=None
+                 ):
+        Token.__init__(self)
+        
+        if oldToken is not None:
+            self.text = oldToken.text
+            self.type = oldToken.type
+            self.line = oldToken.line
+            self.charPositionInLine = oldToken.charPositionInLine
+            self.channel = oldToken.channel
+            
+        self.text = text
+        self.type = type
+        self.line = None
+        self.charPositionInLine = None
+        self.channel = channel
+        self.index = None
+
+
+    def getText(self):
+        return self.text
+
+    def setText(self, text):
+        self.text = text
+
+
+    def getType(self):
+        return self.type 
+
+    def setType(self, ttype):
+        self.type = ttype
+
+    
+    def getLine(self):
+        return self.line
+    
+    def setLine(self, line):
+        self.line = line
+
+
+    def getCharPositionInLine(self):
+        return self.charPositionInLine
+    
+    def setCharPositionInLine(self, pos):
+        self.charPositionInLine = pos
+
+
+    def getChannel(self):
+        return self.channel
+    
+    def setChannel(self, channel):
+        self.channel = channel
+    
+
+    def getTokenIndex(self):
+        return self.index
+    
+    def setTokenIndex(self, index):
+        self.index = index
+
+
+    def getInputStream(self):
+        return None
+
+    def setInputStream(self, input):
+        pass
+
+
+    def toString(self):
+        channelStr = ""
+        if self.channel > 0:
+            channelStr = ",channel=" + str(self.channel)
+            
+        txt = self.text
+        if txt is None:
+            txt = "<no text>"
+
+        return "[@%r,%r,<%r>%s,%r:%r]" % (self.index,
+                                          txt,
+                                          self.type,
+                                          channelStr,
+                                          self.line,
+                                          self.charPositionInLine
+                                          )
+    
+
+    __str__ = toString
+    __repr__ = toString
+
+
+
+EOF_TOKEN = CommonToken(type=EOF)
+	
+INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
+
+# In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR
+# will avoid creating a token for this symbol and try to fetch another.
+SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/tree.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,2448 @@
+""" @package antlr3.tree
+@brief ANTLR3 runtime package, tree module
+
+This module contains all support classes for AST construction and tree parsers.
+
+"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+# lot's of docstrings are missing, don't complain for now...
+# pylint: disable-msg=C0111
+
+from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE
+from antlr3.recognizers import BaseRecognizer, RuleReturnScope
+from antlr3.streams import IntStream
+from antlr3.tokens import CommonToken, Token, INVALID_TOKEN
+from antlr3.exceptions import MismatchedTreeNodeException, \
+     MissingTokenException, UnwantedTokenException, MismatchedTokenException, \
+     NoViableAltException
+
+
+############################################################################
+#
+# tree related exceptions
+#
+############################################################################
+
+
+class RewriteCardinalityException(RuntimeError):
+    """
+    @brief Base class for all exceptions thrown during AST rewrite construction.
+
+    This signifies a case where the cardinality of two or more elements
+    in a subrule are different: (ID INT)+ where |ID|!=|INT|
+    """
+
+    def __init__(self, elementDescription):
+        RuntimeError.__init__(self, elementDescription)
+
+        self.elementDescription = elementDescription
+
+
+    def getMessage(self):
+        return self.elementDescription
+
+
+class RewriteEarlyExitException(RewriteCardinalityException):
+    """@brief No elements within a (...)+ in a rewrite rule"""
+
+    def __init__(self, elementDescription=None):
+        RewriteCardinalityException.__init__(self, elementDescription)
+
+
+class RewriteEmptyStreamException(RewriteCardinalityException):
+    """
+    @brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream
+    """
+
+    pass
+
+
+############################################################################
+#
+# basic Tree and TreeAdaptor interfaces
+#
+############################################################################
+
+class Tree(object):
+    """
+    @brief Abstract baseclass for tree nodes.
+    
+    What does a tree look like?  ANTLR has a number of support classes
+    such as CommonTreeNodeStream that work on these kinds of trees.  You
+    don't have to make your trees implement this interface, but if you do,
+    you'll be able to use more support code.
+
+    NOTE: When constructing trees, ANTLR can build any kind of tree; it can
+    even use Token objects as trees if you add a child list to your tokens.
+    
+    This is a tree node without any payload; just navigation and factory stuff.
+    """
+
+
+    def getChild(self, i):
+        raise NotImplementedError
+    
+
+    def getChildCount(self):
+        raise NotImplementedError
+    
+
+    def getParent(self):
+        """Tree tracks parent and child index now > 3.0"""
+
+        raise NotImplementedError
+    
+    def setParent(self, t):
+        """Tree tracks parent and child index now > 3.0"""
+
+        raise NotImplementedError
+    
+
+    def getChildIndex(self):
+        """This node is what child index? 0..n-1"""
+
+        raise NotImplementedError
+        
+    def setChildIndex(self, index):
+        """This node is what child index? 0..n-1"""
+
+        raise NotImplementedError
+        
+
+    def freshenParentAndChildIndexes(self):
+        """Set the parent and child index values for all children"""
+        
+        raise NotImplementedError
+
+        
+    def addChild(self, t):
+        """
+        Add t as a child to this node.  If t is null, do nothing.  If t
+        is nil, add all children of t to this' children.
+        """
+
+        raise NotImplementedError
+    
+
+    def setChild(self, i, t):
+        """Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
+
+        raise NotImplementedError
+
+            
+    def deleteChild(self, i):
+        raise NotImplementedError
+        
+ 
+    def replaceChildren(self, startChildIndex, stopChildIndex, t):
+        """
+        Delete children from start to stop and replace with t even if t is
+        a list (nil-root tree).  num of children can increase or decrease.
+        For huge child lists, inserting children can force walking rest of
+        children to set their childindex; could be slow.
+        """
+
+        raise NotImplementedError
+
+
+    def isNil(self):
+        """
+        Indicates the node is a nil node but may still have children, meaning
+        the tree is a flat list.
+        """
+
+        raise NotImplementedError
+    
+
+    def getTokenStartIndex(self):
+        """
+        What is the smallest token index (indexing from 0) for this node
+           and its children?
+        """
+
+        raise NotImplementedError
+
+
+    def setTokenStartIndex(self, index):
+        raise NotImplementedError
+
+
+    def getTokenStopIndex(self):
+        """
+        What is the largest token index (indexing from 0) for this node
+        and its children?
+        """
+
+        raise NotImplementedError
+
+
+    def setTokenStopIndex(self, index):
+        raise NotImplementedError
+
+
+    def dupNode(self):
+        raise NotImplementedError
+    
+    
+    def getType(self):
+        """Return a token type; needed for tree parsing."""
+
+        raise NotImplementedError
+    
+
+    def getText(self):
+        raise NotImplementedError
+    
+
+    def getLine(self):
+        """
+        In case we don't have a token payload, what is the line for errors?
+        """
+
+        raise NotImplementedError
+    
+
+    def getCharPositionInLine(self):
+        raise NotImplementedError
+
+
+    def toStringTree(self):
+        raise NotImplementedError
+
+
+    def toString(self):
+        raise NotImplementedError
+
+
+
+class TreeAdaptor(object):
+    """
+    @brief Abstract baseclass for tree adaptors.
+    
+    How to create and navigate trees.  Rather than have a separate factory
+    and adaptor, I've merged them.  Makes sense to encapsulate.
+
+    This takes the place of the tree construction code generated in the
+    generated code in 2.x and the ASTFactory.
+
+    I do not need to know the type of a tree at all so they are all
+    generic Objects.  This may increase the amount of typecasting needed. :(
+    """
+    
+    # C o n s t r u c t i o n
+
+    def createWithPayload(self, payload):
+        """
+        Create a tree node from Token object; for CommonTree type trees,
+        then the token just becomes the payload.  This is the most
+        common create call.
+
+        Override if you want another kind of node to be built.
+        """
+
+        raise NotImplementedError
+    
+
+    def dupNode(self, treeNode):
+        """Duplicate a single tree node.
+
+        Override if you want another kind of node to be built."""
+
+        raise NotImplementedError
+
+
+    def dupTree(self, tree):
+        """Duplicate tree recursively, using dupNode() for each node"""
+
+        raise NotImplementedError
+
+
+    def nil(self):
+        """
+        Return a nil node (an empty but non-null node) that can hold
+        a list of element as the children.  If you want a flat tree (a list)
+        use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
+        """
+
+        raise NotImplementedError
+
+
+    def errorNode(self, input, start, stop, exc):
+        """
+        Return a tree node representing an error.  This node records the
+        tokens consumed during error recovery.  The start token indicates the
+        input symbol at which the error was detected.  The stop token indicates
+        the last symbol consumed during recovery.
+
+        You must specify the input stream so that the erroneous text can
+        be packaged up in the error node.  The exception could be useful
+        to some applications; default implementation stores ptr to it in
+        the CommonErrorNode.
+
+        This only makes sense during token parsing, not tree parsing.
+        Tree parsing should happen only when parsing and tree construction
+        succeed.
+        """
+
+        raise NotImplementedError
+
+
+    def isNil(self, tree):
+        """Is tree considered a nil node used to make lists of child nodes?"""
+
+        raise NotImplementedError
+
+
+    def addChild(self, t, child):
+        """
+        Add a child to the tree t.  If child is a flat tree (a list), make all
+        in list children of t.  Warning: if t has no children, but child does
+        and child isNil then you can decide it is ok to move children to t via
+        t.children = child.children; i.e., without copying the array.  Just
+        make sure that this is consistent with have the user will build
+        ASTs. Do nothing if t or child is null.
+        """
+
+        raise NotImplementedError
+
+
+    def becomeRoot(self, newRoot, oldRoot):
+        """
+        If oldRoot is a nil root, just copy or move the children to newRoot.
+        If not a nil root, make oldRoot a child of newRoot.
+        
+           old=^(nil a b c), new=r yields ^(r a b c)
+           old=^(a b c), new=r yields ^(r ^(a b c))
+
+        If newRoot is a nil-rooted single child tree, use the single
+        child as the new root node.
+
+           old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+           old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+
+        If oldRoot was null, it's ok, just return newRoot (even if isNil).
+
+           old=null, new=r yields r
+           old=null, new=^(nil r) yields ^(nil r)
+
+        Return newRoot.  Throw an exception if newRoot is not a
+        simple node or nil root with a single child node--it must be a root
+        node.  If newRoot is ^(nil x) return x as newRoot.
+
+        Be advised that it's ok for newRoot to point at oldRoot's
+        children; i.e., you don't have to copy the list.  We are
+        constructing these nodes so we should have this control for
+        efficiency.
+        """
+
+        raise NotImplementedError
+
+
+    def rulePostProcessing(self, root):
+        """
+        Given the root of the subtree created for this rule, post process
+        it to do any simplifications or whatever you want.  A required
+        behavior is to convert ^(nil singleSubtree) to singleSubtree
+        as the setting of start/stop indexes relies on a single non-nil root
+        for non-flat trees.
+
+        Flat trees such as for lists like "idlist : ID+ ;" are left alone
+        unless there is only one ID.  For a list, the start/stop indexes
+        are set in the nil node.
+
+        This method is executed after all rule tree construction and right
+        before setTokenBoundaries().
+        """
+
+        raise NotImplementedError
+
+
+    def getUniqueID(self, node):
+        """For identifying trees.
+
+        How to identify nodes so we can say "add node to a prior node"?
+        Even becomeRoot is an issue.  Use System.identityHashCode(node)
+        usually.
+        """
+
+        raise NotImplementedError
+
+
+    # R e w r i t e  R u l e s
+
+    def createFromToken(self, tokenType, fromToken, text=None):
+        """
+        Create a new node derived from a token, with a new token type and
+        (optionally) new text.
+
+        This is invoked from an imaginary node ref on right side of a
+        rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"].
+
+        This should invoke createToken(Token).
+        """
+
+        raise NotImplementedError
+
+
+    def createFromType(self, tokenType, text):
+        """Create a new node derived from a token, with a new token type.
+
+        This is invoked from an imaginary node ref on right side of a
+        rewrite rule as IMAG["IMAG"].
+
+        This should invoke createToken(int,String).
+        """
+
+        raise NotImplementedError
+
+
+    # C o n t e n t
+
+    def getType(self, t):
+        """For tree parsing, I need to know the token type of a node"""
+
+        raise NotImplementedError
+
+
+    def setType(self, t, type):
+        """Node constructors can set the type of a node"""
+
+        raise NotImplementedError
+
+
+    def getText(self, t):
+        raise NotImplementedError
+
+    def setText(self, t, text):
+        """Node constructors can set the text of a node"""
+
+        raise NotImplementedError
+
+
+    def getToken(self, t):
+        """Return the token object from which this node was created.
+
+        Currently used only for printing an error message.
+        The error display routine in BaseRecognizer needs to
+        display where the input the error occurred. If your
+        tree of limitation does not store information that can
+        lead you to the token, you can create a token filled with
+        the appropriate information and pass that back.  See
+        BaseRecognizer.getErrorMessage().
+        """
+
+        raise NotImplementedError
+
+
+    def setTokenBoundaries(self, t, startToken, stopToken):
+        """
+        Where are the bounds in the input token stream for this node and
+        all children?  Each rule that creates AST nodes will call this
+        method right before returning.  Flat trees (i.e., lists) will
+        still usually have a nil root node just to hold the children list.
+        That node would contain the start/stop indexes then.
+        """
+
+        raise NotImplementedError
+
+
+    def getTokenStartIndex(self, t):
+        """
+        Get the token start index for this subtree; return -1 if no such index
+        """
+
+        raise NotImplementedError
+
+        
+    def getTokenStopIndex(self, t):
+        """
+        Get the token stop index for this subtree; return -1 if no such index
+        """
+
+        raise NotImplementedError
+        
+
+    # N a v i g a t i o n  /  T r e e  P a r s i n g
+
+    def getChild(self, t, i):
+        """Get a child 0..n-1 node"""
+
+        raise NotImplementedError
+
+
+    def setChild(self, t, i, child):
+        """Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
+
+        raise NotImplementedError
+
+
+    def deleteChild(self, t, i):
+        """Remove ith child and shift children down from right."""
+        
+        raise NotImplementedError
+
+
+    def getChildCount(self, t):
+        """How many children?  If 0, then this is a leaf node"""
+
+        raise NotImplementedError
+
+
+    def getParent(self, t):
+        """
+        Who is the parent node of this node; if null, implies node is root.
+        If your node type doesn't handle this, it's ok but the tree rewrites
+        in tree parsers need this functionality.
+        """
+        
+        raise NotImplementedError
+
+
+    def setParent(self, t, parent):
+        """
+        Who is the parent node of this node; if null, implies node is root.
+        If your node type doesn't handle this, it's ok but the tree rewrites
+        in tree parsers need this functionality.
+        """
+
+        raise NotImplementedError
+
+
+    def getChildIndex(self, t):
+        """
+        What index is this node in the child list? Range: 0..n-1
+        If your node type doesn't handle this, it's ok but the tree rewrites
+        in tree parsers need this functionality.
+        """
+
+        raise NotImplementedError
+
+        
+    def setChildIndex(self, t, index):
+        """
+        What index is this node in the child list? Range: 0..n-1
+        If your node type doesn't handle this, it's ok but the tree rewrites
+        in tree parsers need this functionality.
+        """
+
+        raise NotImplementedError
+
+
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        """
+        Replace from start to stop child index of parent with t, which might
+        be a list.  Number of children may be different
+        after this call.
+
+        If parent is null, don't do anything; must be at root of overall tree.
+        Can't replace whatever points to the parent externally.  Do nothing.
+        """
+
+        raise NotImplementedError
+
+
+    # Misc
+
+    def create(self, *args):
+        """
+        Deprecated, use createWithPayload, createFromToken or createFromType.
+
+        This method only exists to mimic the Java interface of TreeAdaptor.
+        
+        """
+
+        if len(args) == 1 and isinstance(args[0], Token):
+            # Object create(Token payload);
+##             warnings.warn(
+##                 "Using create() is deprecated, use createWithPayload()",
+##                 DeprecationWarning,
+##                 stacklevel=2
+##                 )
+            return self.createWithPayload(args[0])
+
+        if (len(args) == 2
+            and isinstance(args[0], (int, long))
+            and isinstance(args[1], Token)
+            ):
+            # Object create(int tokenType, Token fromToken);
+##             warnings.warn(
+##                 "Using create() is deprecated, use createFromToken()",
+##                 DeprecationWarning,
+##                 stacklevel=2
+##                 )
+            return self.createFromToken(args[0], args[1])
+
+        if (len(args) == 3
+            and isinstance(args[0], (int, long))
+            and isinstance(args[1], Token)
+            and isinstance(args[2], basestring)
+            ):
+            # Object create(int tokenType, Token fromToken, String text);
+##             warnings.warn(
+##                 "Using create() is deprecated, use createFromToken()",
+##                 DeprecationWarning,
+##                 stacklevel=2
+##                 )
+            return self.createFromToken(args[0], args[1], args[2])
+
+        if (len(args) == 2
+            and isinstance(args[0], (int, long))
+            and isinstance(args[1], basestring)
+            ):
+            # Object create(int tokenType, String text);
+##             warnings.warn(
+##                 "Using create() is deprecated, use createFromType()",
+##                 DeprecationWarning,
+##                 stacklevel=2
+##                 )
+            return self.createFromType(args[0], args[1])
+
+        raise TypeError(
+            "No create method with this signature found: %s"
+            % (', '.join(type(v).__name__ for v in args))
+            )
+    
+
+############################################################################
+#
+# base implementation of Tree and TreeAdaptor
+#
+# Tree
+# \- BaseTree
+#
+# TreeAdaptor
+# \- BaseTreeAdaptor
+#
+############################################################################
+
+
+class BaseTree(Tree):
+    """
+    @brief A generic tree implementation with no payload.
+
+    You must subclass to
+    actually have any user data.  ANTLR v3 uses a list of children approach
+    instead of the child-sibling approach in v2.  A flat tree (a list) is
+    an empty node whose children represent the list.  An empty, but
+    non-null node is called "nil".
+    """
+
+    # BaseTree is abstract, no need to complain about not implemented abstract
+    # methods
+    # pylint: disable-msg=W0223
+    
+    def __init__(self, node=None):
+        """
+        Create a new node from an existing node does nothing for BaseTree
+        as there are no fields other than the children list, which cannot
+        be copied as the children are not considered part of this node. 
+        """
+        
+        Tree.__init__(self)
+        self.children = []
+        self.parent = None
+        self.childIndex = 0
+        
+
+    def getChild(self, i):
+        try:
+            return self.children[i]
+        except IndexError:
+            return None
+
+
+    def getChildren(self):
+        """@brief Get the children internal List
+
+        Note that if you directly mess with
+        the list, do so at your own risk.
+        """
+        
+        # FIXME: mark as deprecated
+        return self.children
+
+
+    def getFirstChildWithType(self, treeType):
+        for child in self.children:
+            if child.getType() == treeType:
+                return child
+
+        return None
+
+
+    def getChildCount(self):
+        return len(self.children)
+
+
+    def addChild(self, childTree):
+        """Add t as child of this node.
+
+        Warning: if t has no children, but child does
+        and child isNil then this routine moves children to t via
+        t.children = child.children; i.e., without copying the array.
+        """
+
+        # this implementation is much simpler and probably less efficient
+        # than the mumbo-jumbo that Ter did for the Java runtime.
+        
+        if childTree is None:
+            return
+
+        if childTree.isNil():
+            # t is an empty node possibly with children
+
+            if self.children is childTree.children:
+                raise ValueError("attempt to add child list to itself")
+
+            # fix parent pointer and childIndex for new children
+            for idx, child in enumerate(childTree.children):
+                child.parent = self
+                child.childIndex = len(self.children) + idx
+                
+            self.children += childTree.children
+
+        else:
+            # child is not nil (don't care about children)
+            self.children.append(childTree)
+            childTree.parent = self
+            childTree.childIndex = len(self.children) - 1
+
+
+    def addChildren(self, children):
+        """Add all elements of kids list as children of this node"""
+
+        self.children += children
+
+
+    def setChild(self, i, t):
+        if t is None:
+            return
+
+        if t.isNil():
+            raise ValueError("Can't set single child to a list")
+        
+        self.children[i] = t
+        t.parent = self
+        t.childIndex = i
+        
+
+    def deleteChild(self, i):
+        killed = self.children[i]
+        
+        del self.children[i]
+        
+        # walk rest and decrement their child indexes
+        for idx, child in enumerate(self.children[i:]):
+            child.childIndex = i + idx
+            
+        return killed
+
+    
+    def replaceChildren(self, startChildIndex, stopChildIndex, newTree):
+        """
+        Delete children from start to stop and replace with t even if t is
+        a list (nil-root tree).  num of children can increase or decrease.
+        For huge child lists, inserting children can force walking rest of
+        children to set their childindex; could be slow.
+        """
+
+        if (startChildIndex >= len(self.children)
+            or stopChildIndex >= len(self.children)
+            ):
+            raise IndexError("indexes invalid")
+
+        replacingHowMany = stopChildIndex - startChildIndex + 1
+
+        # normalize to a list of children to add: newChildren
+        if newTree.isNil():
+            newChildren = newTree.children
+
+        else:
+            newChildren = [newTree]
+
+        replacingWithHowMany = len(newChildren)
+        delta = replacingHowMany - replacingWithHowMany
+        
+        
+        if delta == 0:
+            # if same number of nodes, do direct replace
+            for idx, child in enumerate(newChildren):
+                self.children[idx + startChildIndex] = child
+                child.parent = self
+                child.childIndex = idx + startChildIndex
+
+        else:
+            # length of children changes...
+
+            # ...delete replaced segment...
+            del self.children[startChildIndex:stopChildIndex+1]
+
+            # ...insert new segment...
+            self.children[startChildIndex:startChildIndex] = newChildren
+
+            # ...and fix indeces
+            self.freshenParentAndChildIndexes(startChildIndex)
+            
+
+    def isNil(self):
+        return False
+
+
+    def freshenParentAndChildIndexes(self, offset=0):
+        for idx, child in enumerate(self.children[offset:]):
+            child.childIndex = idx + offset
+            child.parent = self
+
+
+    def sanityCheckParentAndChildIndexes(self, parent=None, i=-1):
+        if parent != self.parent:
+            raise ValueError(
+                "parents don't match; expected %r found %r"
+                % (parent, self.parent)
+                )
+        
+        if i != self.childIndex:
+            raise ValueError(
+                "child indexes don't match; expected %d found %d"
+                % (i, self.childIndex)
+                )
+
+        for idx, child in enumerate(self.children):
+            child.sanityCheckParentAndChildIndexes(self, idx)
+
+
+    def getChildIndex(self):
+        """BaseTree doesn't track child indexes."""
+        
+        return 0
+
+
+    def setChildIndex(self, index):
+        """BaseTree doesn't track child indexes."""
+
+        pass
+    
+
+    def getParent(self):
+        """BaseTree doesn't track parent pointers."""
+
+        return None
+
+    def setParent(self, t):
+        """BaseTree doesn't track parent pointers."""
+
+        pass
+
+
+    def toStringTree(self):
+        """Print out a whole tree not just a node"""
+
+        if len(self.children) == 0:
+            return self.toString()
+
+        buf = []
+        if not self.isNil():
+            buf.append('(')
+            buf.append(self.toString())
+            buf.append(' ')
+
+        for i, child in enumerate(self.children):
+            if i > 0:
+                buf.append(' ')
+            buf.append(child.toStringTree())
+
+        if not self.isNil():
+            buf.append(')')
+
+        return ''.join(buf)
+
+
+    def getLine(self):
+        return 0
+
+
+    def getCharPositionInLine(self):
+        return 0
+
+
+    def toString(self):
+        """Override to say how a node (not a tree) should look as text"""
+
+        raise NotImplementedError
+
+
+
+class BaseTreeAdaptor(TreeAdaptor):
+    """
+    @brief A TreeAdaptor that works with any Tree implementation.
+    """
+    
+    # BaseTreeAdaptor is abstract, no need to complain about not implemented
+    # abstract methods
+    # pylint: disable-msg=W0223
+    
+    def nil(self):
+        return self.createWithPayload(None)
+
+
+    def errorNode(self, input, start, stop, exc):
+        """
+        create tree node that holds the start and stop tokens associated
+        with an error.
+
+        If you specify your own kind of tree nodes, you will likely have to
+        override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
+        if no token payload but you might have to set token type for diff
+        node type.
+        """
+        
+        return CommonErrorNode(input, start, stop, exc)
+    
+
+    def isNil(self, tree):
+        return tree.isNil()
+
+
+    def dupTree(self, t, parent=None):
+        """
+        This is generic in the sense that it will work with any kind of
+        tree (not just Tree interface).  It invokes the adaptor routines
+        not the tree node routines to do the construction.
+        """
+
+        if t is None:
+            return None
+
+        newTree = self.dupNode(t)
+        
+        # ensure new subtree root has parent/child index set
+
+        # same index in new tree
+        self.setChildIndex(newTree, self.getChildIndex(t))
+        
+        self.setParent(newTree, parent)
+
+        for i in range(self.getChildCount(t)):
+            child = self.getChild(t, i)
+            newSubTree = self.dupTree(child, t)
+            self.addChild(newTree, newSubTree)
+
+        return newTree
+
+
+    def addChild(self, tree, child):
+        """
+        Add a child to the tree t.  If child is a flat tree (a list), make all
+        in list children of t.  Warning: if t has no children, but child does
+        and child isNil then you can decide it is ok to move children to t via
+        t.children = child.children; i.e., without copying the array.  Just
+        make sure that this is consistent with have the user will build
+        ASTs.
+        """
+
+        #if isinstance(child, Token):
+        #    child = self.createWithPayload(child)
+        
+        if tree is not None and child is not None:
+            tree.addChild(child)
+
+
+    def becomeRoot(self, newRoot, oldRoot):
+        """
+        If oldRoot is a nil root, just copy or move the children to newRoot.
+        If not a nil root, make oldRoot a child of newRoot.
+
+          old=^(nil a b c), new=r yields ^(r a b c)
+          old=^(a b c), new=r yields ^(r ^(a b c))
+
+        If newRoot is a nil-rooted single child tree, use the single
+        child as the new root node.
+
+          old=^(nil a b c), new=^(nil r) yields ^(r a b c)
+          old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
+
+        If oldRoot was null, it's ok, just return newRoot (even if isNil).
+
+          old=null, new=r yields r
+          old=null, new=^(nil r) yields ^(nil r)
+
+        Return newRoot.  Throw an exception if newRoot is not a
+        simple node or nil root with a single child node--it must be a root
+        node.  If newRoot is ^(nil x) return x as newRoot.
+
+        Be advised that it's ok for newRoot to point at oldRoot's
+        children; i.e., you don't have to copy the list.  We are
+        constructing these nodes so we should have this control for
+        efficiency.
+        """
+
+        if isinstance(newRoot, Token):
+            newRoot = self.create(newRoot)
+
+        if oldRoot is None:
+            return newRoot
+        
+        if not isinstance(newRoot, CommonTree):
+            newRoot = self.createWithPayload(newRoot)
+
+        # handle ^(nil real-node)
+        if newRoot.isNil():
+            nc = newRoot.getChildCount()
+            if nc == 1:
+                newRoot = newRoot.getChild(0)
+                
+            elif nc > 1:
+                # TODO: make tree run time exceptions hierarchy
+                raise RuntimeError("more than one node as root")
+
+        # add oldRoot to newRoot; addChild takes care of case where oldRoot
+        # is a flat list (i.e., nil-rooted tree).  All children of oldRoot
+        # are added to newRoot.
+        newRoot.addChild(oldRoot)
+        return newRoot
+
+
+    def rulePostProcessing(self, root):
+        """Transform ^(nil x) to x and nil to null"""
+        
+        if root is not None and root.isNil():
+            if root.getChildCount() == 0:
+                root = None
+
+            elif root.getChildCount() == 1:
+                root = root.getChild(0)
+                # whoever invokes rule will set parent and child index
+                root.setParent(None)
+                root.setChildIndex(-1)
+
+        return root
+
+
+    def createFromToken(self, tokenType, fromToken, text=None):
+        assert isinstance(tokenType, (int, long)), type(tokenType).__name__
+        assert isinstance(fromToken, Token), type(fromToken).__name__
+        assert text is None or isinstance(text, basestring), type(text).__name__
+
+        fromToken = self.createToken(fromToken)
+        fromToken.type = tokenType
+        if text is not None:
+            fromToken.text = text
+        t = self.createWithPayload(fromToken)
+        return t
+
+
+    def createFromType(self, tokenType, text):
+        assert isinstance(tokenType, (int, long)), type(tokenType).__name__
+        assert isinstance(text, basestring), type(text).__name__
+                          
+        fromToken = self.createToken(tokenType=tokenType, text=text)
+        t = self.createWithPayload(fromToken)
+        return t
+
+
+    def getType(self, t):
+        return t.getType()
+
+
+    def setType(self, t, type):
+        raise RuntimeError("don't know enough about Tree node")
+
+
+    def getText(self, t):
+        return t.getText()
+
+
+    def setText(self, t, text):
+        raise RuntimeError("don't know enough about Tree node")
+
+
+    def getChild(self, t, i):
+        return t.getChild(i)
+
+
+    def setChild(self, t, i, child):
+        t.setChild(i, child)
+
+
+    def deleteChild(self, t, i):
+        return t.deleteChild(i)
+
+
+    def getChildCount(self, t):
+        return t.getChildCount()
+
+
+    def getUniqueID(self, node):
+        return hash(node)
+
+
+    def createToken(self, fromToken=None, tokenType=None, text=None):
+        """
+        Tell me how to create a token for use with imaginary token nodes.
+        For example, there is probably no input symbol associated with imaginary
+        token DECL, but you need to create it as a payload or whatever for
+        the DECL node as in ^(DECL type ID).
+
+        If you care what the token payload objects' type is, you should
+        override this method and any other createToken variant.
+        """
+
+        raise NotImplementedError
+
+
+############################################################################
+#
+# common tree implementation
+#
+# Tree
+# \- BaseTree
+#    \- CommonTree
+#       \- CommonErrorNode
+#
+# TreeAdaptor
+# \- BaseTreeAdaptor
+#    \- CommonTreeAdaptor
+#
+############################################################################
+
+
+class CommonTree(BaseTree):
+    """@brief A tree node that is wrapper for a Token object.
+
+    After 3.0 release
+    while building tree rewrite stuff, it became clear that computing
+    parent and child index is very difficult and cumbersome.  Better to
+    spend the space in every tree node.  If you don't want these extra
+    fields, it's easy to cut them out in your own BaseTree subclass.
+    
+    """
+
+    def __init__(self, payload):
+        BaseTree.__init__(self)
+        
+        # What token indexes bracket all tokens associated with this node
+        # and below?
+        self.startIndex = -1
+        self.stopIndex = -1
+
+        # Who is the parent node of this node; if null, implies node is root
+        self.parent = None
+        
+        # What index is this node in the child list? Range: 0..n-1
+        self.childIndex = -1
+
+        # A single token is the payload
+        if payload is None:
+            self.token = None
+            
+        elif isinstance(payload, CommonTree):
+            self.token = payload.token
+            self.startIndex = payload.startIndex
+            self.stopIndex = payload.stopIndex
+            
+        elif payload is None or isinstance(payload, Token):
+            self.token = payload
+            
+        else:
+            raise TypeError(type(payload).__name__)
+
+
+
+    def getToken(self):
+        return self.token
+
+
+    def dupNode(self):
+        return CommonTree(self)
+
+
+    def isNil(self):
+        return self.token is None
+
+
+    def getType(self):
+        if self.token is None:
+            return INVALID_TOKEN_TYPE
+
+        return self.token.getType()
+
+    type = property(getType)
+    
+
+    def getText(self):
+        if self.token is None:
+            return None
+        
+        return self.token.text
+
+    text = property(getText)
+    
+
+    def getLine(self):
+        if self.token is None or self.token.getLine() == 0:
+            if self.getChildCount():
+                return self.getChild(0).getLine()
+            else:
+                return 0
+
+        return self.token.getLine()
+
+    line = property(getLine)
+    
+
+    def getCharPositionInLine(self):
+        if self.token is None or self.token.getCharPositionInLine() == -1:
+            if self.getChildCount():
+                return self.getChild(0).getCharPositionInLine()
+            else:
+                return 0
+
+        else:
+            return self.token.getCharPositionInLine()
+
+    charPositionInLine = property(getCharPositionInLine)
+    
+
+    def getTokenStartIndex(self):
+        if self.startIndex == -1 and self.token is not None:
+            return self.token.getTokenIndex()
+        
+        return self.startIndex
+    
+    def setTokenStartIndex(self, index):
+        self.startIndex = index
+
+    tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex)
+
+
+    def getTokenStopIndex(self):
+        if self.stopIndex == -1 and self.token is not None:
+            return self.token.getTokenIndex()
+        
+        return self.stopIndex
+
+    def setTokenStopIndex(self, index):
+        self.stopIndex = index
+
+    tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex)
+
+
+    def getChildIndex(self):
+        #FIXME: mark as deprecated
+        return self.childIndex
+
+
+    def setChildIndex(self, idx):
+        #FIXME: mark as deprecated
+        self.childIndex = idx
+
+
+    def getParent(self):
+        #FIXME: mark as deprecated
+        return self.parent
+
+
+    def setParent(self, t):
+        #FIXME: mark as deprecated
+        self.parent = t
+
+        
+    def toString(self):
+        if self.isNil():
+            return "nil"
+
+        if self.getType() == INVALID_TOKEN_TYPE:
+            return "<errornode>"
+
+        return self.token.text
+
+    __str__ = toString   
+
+
+
+    def toStringTree(self):
+        if not self.children:
+            return self.toString()
+
+        ret = ''
+        if not self.isNil():
+            ret += '(%s ' % (self.toString())
+        
+        ret += ' '.join([child.toStringTree() for child in self.children])
+
+        if not self.isNil():
+            ret += ')'
+
+        return ret
+
+
+INVALID_NODE = CommonTree(INVALID_TOKEN)
+
+
+class CommonErrorNode(CommonTree):
+    """A node representing erroneous token range in token stream"""
+
+    def __init__(self, input, start, stop, exc):
+        CommonTree.__init__(self, None)
+
+        if (stop is None or
+            (stop.getTokenIndex() < start.getTokenIndex() and
+             stop.getType() != EOF
+             )
+            ):
+            # sometimes resync does not consume a token (when LT(1) is
+            # in follow set.  So, stop will be 1 to left to start. adjust.
+            # Also handle case where start is the first token and no token
+            # is consumed during recovery; LT(-1) will return null.
+            stop = start
+
+        self.input = input
+        self.start = start
+        self.stop = stop
+        self.trappedException = exc
+
+
+    def isNil(self):
+        return False
+
+
+    def getType(self):
+        return INVALID_TOKEN_TYPE
+
+
+    def getText(self):
+        if isinstance(self.start, Token):
+            i = self.start.getTokenIndex()
+            j = self.stop.getTokenIndex()
+            if self.stop.getType() == EOF:
+                j = self.input.size()
+
+            badText = self.input.toString(i, j)
+
+        elif isinstance(self.start, Tree):
+            badText = self.input.toString(self.start, self.stop)
+
+        else:
+            # people should subclass if they alter the tree type so this
+            # next one is for sure correct.
+            badText = "<unknown>"
+
+        return badText
+
+
+    def toString(self):
+        if isinstance(self.trappedException, MissingTokenException):
+            return ("<missing type: "
+                    + str(self.trappedException.getMissingType())
+                    + ">")
+
+        elif isinstance(self.trappedException, UnwantedTokenException):
+            return ("<extraneous: "
+                    + str(self.trappedException.getUnexpectedToken())
+                    + ", resync=" + self.getText() + ">")
+
+        elif isinstance(self.trappedException, MismatchedTokenException):
+            return ("<mismatched token: "
+                    + str(self.trappedException.token)
+                    + ", resync=" + self.getText() + ">")
+
+        elif isinstance(self.trappedException, NoViableAltException):
+            return ("<unexpected: "
+                    + str(self.trappedException.token)
+                    + ", resync=" + self.getText() + ">")
+
+        return "<error: "+self.getText()+">"
+
+
+class CommonTreeAdaptor(BaseTreeAdaptor):
+    """
+    @brief A TreeAdaptor that works with any Tree implementation.
+    
+    It provides
+    really just factory methods; all the work is done by BaseTreeAdaptor.
+    If you would like to have different tokens created than ClassicToken
+    objects, you need to override this and then set the parser tree adaptor to
+    use your subclass.
+
+    To get your parser to build nodes of a different type, override
+    create(Token).
+    """
+    
+    def dupNode(self, treeNode):
+        """
+        Duplicate a node.  This is part of the factory;
+        override if you want another kind of node to be built.
+
+        I could use reflection to prevent having to override this
+        but reflection is slow.
+        """
+
+        if treeNode is None:
+            return None
+        
+        return treeNode.dupNode()
+
+
+    def createWithPayload(self, payload):
+        return CommonTree(payload)
+
+
+    def createToken(self, fromToken=None, tokenType=None, text=None):
+        """
+        Tell me how to create a token for use with imaginary token nodes.
+        For example, there is probably no input symbol associated with imaginary
+        token DECL, but you need to create it as a payload or whatever for
+        the DECL node as in ^(DECL type ID).
+
+        If you care what the token payload objects' type is, you should
+        override this method and any other createToken variant.
+        """
+        
+        if fromToken is not None:
+            return CommonToken(oldToken=fromToken)
+
+        return CommonToken(type=tokenType, text=text)
+
+
+    def setTokenBoundaries(self, t, startToken, stopToken):
+        """
+        Track start/stop token for subtree root created for a rule.
+        Only works with Tree nodes.  For rules that match nothing,
+        seems like this will yield start=i and stop=i-1 in a nil node.
+        Might be useful info so I'll not force to be i..i.
+        """
+        
+        if t is None:
+            return
+
+        start = 0
+        stop = 0
+        
+        if startToken is not None:
+            start = startToken.index
+                
+        if stopToken is not None:
+            stop = stopToken.index
+
+        t.setTokenStartIndex(start)
+        t.setTokenStopIndex(stop)
+
+
+    def getTokenStartIndex(self, t):
+        if t is None:
+            return -1
+        return t.getTokenStartIndex()
+
+
+    def getTokenStopIndex(self, t):
+        if t is None:
+            return -1
+        return t.getTokenStopIndex()
+
+
+    def getText(self, t):
+        if t is None:
+            return None
+        return t.getText()
+
+
+    def getType(self, t):
+        if t is None:
+            return INVALID_TOKEN_TYPE
+        
+        return t.getType()
+
+
+    def getToken(self, t):
+        """
+        What is the Token associated with this node?  If
+        you are not using CommonTree, then you must
+        override this in your own adaptor.
+        """
+
+        if isinstance(t, CommonTree):
+            return t.getToken()
+
+        return None # no idea what to do
+
+
+    def getChild(self, t, i):
+        if t is None:
+            return None
+        return t.getChild(i)
+
+
+    def getChildCount(self, t):
+        if t is None:
+            return 0
+        return t.getChildCount()
+
+
+    def getParent(self, t):
+        return t.getParent()
+
+
+    def setParent(self, t, parent):
+        t.setParent(parent)
+
+
+    def getChildIndex(self, t):
+        return t.getChildIndex()
+
+
+    def setChildIndex(self, t, index):
+        t.setChildIndex(index)
+
+
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        if parent is not None:
+            parent.replaceChildren(startChildIndex, stopChildIndex, t)
+
+
+############################################################################
+#
+# streams
+#
+# TreeNodeStream
+# \- BaseTree
+#    \- CommonTree
+#
+# TreeAdaptor
+# \- BaseTreeAdaptor
+#    \- CommonTreeAdaptor
+#
+############################################################################
+
+
+
+class TreeNodeStream(IntStream):
+    """@brief A stream of tree nodes
+
+    It accessing nodes from a tree of some kind.
+    """
+    
+    # TreeNodeStream is abstract, no need to complain about not implemented
+    # abstract methods
+    # pylint: disable-msg=W0223
+    
+    def get(self, i):
+        """Get a tree node at an absolute index i; 0..n-1.
+        If you don't want to buffer up nodes, then this method makes no
+        sense for you.
+        """
+
+        raise NotImplementedError
+
+
+    def LT(self, k):
+        """
+        Get tree node at current input pointer + i ahead where i=1 is next node.
+        i<0 indicates nodes in the past.  So LT(-1) is previous node, but
+        implementations are not required to provide results for k < -1.
+        LT(0) is undefined.  For i>=n, return null.
+        Return null for LT(0) and any index that results in an absolute address
+        that is negative.
+
+        This is analogus to the LT() method of the TokenStream, but this
+        returns a tree node instead of a token.  Makes code gen identical
+        for both parser and tree grammars. :)
+        """
+
+        raise NotImplementedError
+
+
+    def getTreeSource(self):
+        """
+        Where is this stream pulling nodes from?  This is not the name, but
+        the object that provides node objects.
+        """
+
+        raise NotImplementedError
+    
+
+    def getTokenStream(self):
+        """
+        If the tree associated with this stream was created from a TokenStream,
+        you can specify it here.  Used to do rule $text attribute in tree
+        parser.  Optional unless you use tree parser rule text attribute
+        or output=template and rewrite=true options.
+        """
+
+        raise NotImplementedError
+
+
+    def getTreeAdaptor(self):
+        """
+        What adaptor can tell me how to interpret/navigate nodes and
+        trees.  E.g., get text of a node.
+        """
+
+        raise NotImplementedError
+        
+
+    def setUniqueNavigationNodes(self, uniqueNavigationNodes):
+        """
+        As we flatten the tree, we use UP, DOWN nodes to represent
+        the tree structure.  When debugging we need unique nodes
+        so we have to instantiate new ones.  When doing normal tree
+        parsing, it's slow and a waste of memory to create unique
+        navigation nodes.  Default should be false;
+        """
+
+        raise NotImplementedError
+        
+
+    def toString(self, start, stop):
+        """
+        Return the text of all nodes from start to stop, inclusive.
+        If the stream does not buffer all the nodes then it can still
+        walk recursively from start until stop.  You can always return
+        null or "" too, but users should not access $ruleLabel.text in
+        an action of course in that case.
+        """
+
+        raise NotImplementedError
+
+
+    # REWRITING TREES (used by tree parser)
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        """
+ 	Replace from start to stop child index of parent with t, which might
+        be a list.  Number of children may be different
+        after this call.  The stream is notified because it is walking the
+        tree and might need to know you are monkeying with the underlying
+        tree.  Also, it might be able to modify the node stream to avoid
+        restreaming for future phases.
+
+        If parent is null, don't do anything; must be at root of overall tree.
+        Can't replace whatever points to the parent externally.  Do nothing.
+        """
+
+        raise NotImplementedError
+
+
+class CommonTreeNodeStream(TreeNodeStream):
+    """@brief A buffered stream of tree nodes.
+
+    Nodes can be from a tree of ANY kind.
+
+    This node stream sucks all nodes out of the tree specified in
+    the constructor during construction and makes pointers into
+    the tree using an array of Object pointers. The stream necessarily
+    includes pointers to DOWN and UP and EOF nodes.
+
+    This stream knows how to mark/release for backtracking.
+
+    This stream is most suitable for tree interpreters that need to
+    jump around a lot or for tree parsers requiring speed (at cost of memory).
+    There is some duplicated functionality here with UnBufferedTreeNodeStream
+    but just in bookkeeping, not tree walking etc...
+
+    @see UnBufferedTreeNodeStream
+    """
+    
+    def __init__(self, *args):
+        TreeNodeStream.__init__(self)
+
+        if len(args) == 1:
+            adaptor = CommonTreeAdaptor()
+            tree = args[0]
+
+        elif len(args) == 2:
+            adaptor = args[0]
+            tree = args[1]
+
+        else:
+            raise TypeError("Invalid arguments")
+        
+        # all these navigation nodes are shared and hence they
+        # cannot contain any line/column info
+        self.down = adaptor.createFromType(DOWN, "DOWN")
+        self.up = adaptor.createFromType(UP, "UP")
+        self.eof = adaptor.createFromType(EOF, "EOF")
+
+        # The complete mapping from stream index to tree node.
+        # This buffer includes pointers to DOWN, UP, and EOF nodes.
+        # It is built upon ctor invocation.  The elements are type
+        #  Object as we don't what the trees look like.
+
+        # Load upon first need of the buffer so we can set token types
+        # of interest for reverseIndexing.  Slows us down a wee bit to
+        # do all of the if p==-1 testing everywhere though.
+        self.nodes = []
+
+        # Pull nodes from which tree?
+        self.root = tree
+
+        # IF this tree (root) was created from a token stream, track it.
+        self.tokens = None
+
+        # What tree adaptor was used to build these trees
+        self.adaptor = adaptor
+
+        # Reuse same DOWN, UP navigation nodes unless this is true
+        self.uniqueNavigationNodes = False
+
+        # The index into the nodes list of the current node (next node
+        # to consume).  If -1, nodes array not filled yet.
+        self.p = -1
+
+        # Track the last mark() call result value for use in rewind().
+        self.lastMarker = None
+
+        # Stack of indexes used for push/pop calls
+        self.calls = []
+
+
+    def fillBuffer(self):
+        """Walk tree with depth-first-search and fill nodes buffer.
+        Don't do DOWN, UP nodes if its a list (t is isNil).
+        """
+
+        self._fillBuffer(self.root)
+        self.p = 0 # buffer of nodes intialized now
+
+
+    def _fillBuffer(self, t):
+        nil = self.adaptor.isNil(t)
+        
+        if not nil:
+            self.nodes.append(t) # add this node
+
+        # add DOWN node if t has children
+        n = self.adaptor.getChildCount(t)
+        if not nil and n > 0:
+            self.addNavigationNode(DOWN)
+
+        # and now add all its children
+        for c in range(n):
+            self._fillBuffer(self.adaptor.getChild(t, c))
+
+        # add UP node if t has children
+        if not nil and n > 0:
+            self.addNavigationNode(UP)
+
+
+    def getNodeIndex(self, node):
+        """What is the stream index for node? 0..n-1
+        Return -1 if node not found.
+        """
+        
+        if self.p == -1:
+            self.fillBuffer()
+
+        for i, t in enumerate(self.nodes):
+            if t == node:
+                return i
+
+        return -1
+
+
+    def addNavigationNode(self, ttype):
+        """
+        As we flatten the tree, we use UP, DOWN nodes to represent
+        the tree structure.  When debugging we need unique nodes
+        so instantiate new ones when uniqueNavigationNodes is true.
+        """
+        
+        navNode = None
+        
+        if ttype == DOWN:
+            if self.hasUniqueNavigationNodes():
+                navNode = self.adaptor.createFromType(DOWN, "DOWN")
+
+            else:
+                navNode = self.down
+
+        else:
+            if self.hasUniqueNavigationNodes():
+                navNode = self.adaptor.createFromType(UP, "UP")
+                
+            else:
+                navNode = self.up
+
+        self.nodes.append(navNode)
+
+
+    def get(self, i):
+        if self.p == -1:
+            self.fillBuffer()
+
+        return self.nodes[i]
+
+
+    def LT(self, k):
+        if self.p == -1:
+            self.fillBuffer()
+
+        if k == 0:
+            return None
+
+        if k < 0:
+            return self.LB(-k)
+
+        #System.out.print("LT(p="+p+","+k+")=");
+        if self.p + k - 1 >= len(self.nodes):
+            return self.eof
+
+        return self.nodes[self.p + k - 1]
+    
+
+    def getCurrentSymbol(self):
+        return self.LT(1)
+
+
+    def LB(self, k):
+        """Look backwards k nodes"""
+        
+        if k == 0:
+            return None
+
+        if self.p - k < 0:
+            return None
+
+        return self.nodes[self.p - k]
+
+
+    def getTreeSource(self):
+        return self.root
+
+
+    def getSourceName(self):
+        return self.getTokenStream().getSourceName()
+
+
+    def getTokenStream(self):
+        return self.tokens
+
+
+    def setTokenStream(self, tokens):
+        self.tokens = tokens
+
+
+    def getTreeAdaptor(self):
+        return self.adaptor
+
+
+    def hasUniqueNavigationNodes(self):
+        return self.uniqueNavigationNodes
+
+
+    def setUniqueNavigationNodes(self, uniqueNavigationNodes):
+        self.uniqueNavigationNodes = uniqueNavigationNodes
+
+
+    def consume(self):
+        if self.p == -1:
+            self.fillBuffer()
+            
+        self.p += 1
+
+        
+    def LA(self, i):
+        return self.adaptor.getType(self.LT(i))
+
+
+    def mark(self):
+        if self.p == -1:
+            self.fillBuffer()
+
+        
+        self.lastMarker = self.index()
+        return self.lastMarker
+
+
+    def release(self, marker=None):
+        # no resources to release
+
+        pass
+
+
+    def index(self):
+        return self.p
+
+
+    def rewind(self, marker=None):
+        if marker is None:
+            marker = self.lastMarker
+            
+        self.seek(marker)
+
+
+    def seek(self, index):
+        if self.p == -1:
+            self.fillBuffer()
+
+        self.p = index
+
+
+    def push(self, index):
+        """
+        Make stream jump to a new location, saving old location.
+        Switch back with pop().
+        """
+
+        self.calls.append(self.p) # save current index
+        self.seek(index)
+
+
+    def pop(self):
+        """
+        Seek back to previous index saved during last push() call.
+        Return top of stack (return index).
+        """
+
+        ret = self.calls.pop(-1)
+        self.seek(ret)
+        return ret
+
+
+    def reset(self):
+        self.p = 0
+        self.lastMarker = 0
+        self.calls = []
+
+        
+    def size(self):
+        if self.p == -1:
+            self.fillBuffer()
+
+        return len(self.nodes)
+
+
+    # TREE REWRITE INTERFACE
+
+    def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
+        if parent is not None:
+            self.adaptor.replaceChildren(
+                parent, startChildIndex, stopChildIndex, t
+                )
+
+
+    def __str__(self):
+        """Used for testing, just return the token type stream"""
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        return ' '.join([str(self.adaptor.getType(node))
+                         for node in self.nodes
+                         ])
+
+
+    def toString(self, start, stop):
+        if start is None or stop is None:
+            return None
+
+        if self.p == -1:
+            self.fillBuffer()
+
+        #System.out.println("stop: "+stop);
+        #if ( start instanceof CommonTree )
+        #    System.out.print("toString: "+((CommonTree)start).getToken()+", ");
+        #else
+        #    System.out.println(start);
+        #if ( stop instanceof CommonTree )
+        #    System.out.println(((CommonTree)stop).getToken());
+        #else
+        #    System.out.println(stop);
+            
+        # if we have the token stream, use that to dump text in order
+        if self.tokens is not None:
+            beginTokenIndex = self.adaptor.getTokenStartIndex(start)
+            endTokenIndex = self.adaptor.getTokenStopIndex(stop)
+            
+            # if it's a tree, use start/stop index from start node
+            # else use token range from start/stop nodes
+            if self.adaptor.getType(stop) == UP:
+                endTokenIndex = self.adaptor.getTokenStopIndex(start)
+
+            elif self.adaptor.getType(stop) == EOF:
+                endTokenIndex = self.size() -2 # don't use EOF
+
+            return self.tokens.toString(beginTokenIndex, endTokenIndex)
+
+        # walk nodes looking for start
+        i, t = 0, None
+        for i, t in enumerate(self.nodes):
+            if t == start:
+                break
+
+        # now walk until we see stop, filling string buffer with text
+        buf = []
+        t = self.nodes[i]
+        while t != stop:
+            text = self.adaptor.getText(t)
+            if text is None:
+                text = " " + self.adaptor.getType(t)
+
+            buf.append(text)
+            i += 1
+            t = self.nodes[i]
+
+        # include stop node too
+        text = self.adaptor.getText(stop)
+        if text is None:
+            text = " " +self.adaptor.getType(stop)
+
+        buf.append(text)
+        
+        return ''.join(buf)
+    
+
+    ## iterator interface
+    def __iter__(self):
+        if self.p == -1:
+            self.fillBuffer()
+
+        for node in self.nodes:
+            yield node
+
+
+#############################################################################
+#
+# tree parser
+#
+#############################################################################
+
+class TreeParser(BaseRecognizer):
+    """@brief Baseclass for generated tree parsers.
+    
+    A parser for a stream of tree nodes.  "tree grammars" result in a subclass
+    of this.  All the error reporting and recovery is shared with Parser via
+    the BaseRecognizer superclass.
+    """
+
+    def __init__(self, input, state=None):
+        BaseRecognizer.__init__(self, state)
+
+        self.input = None
+        self.setTreeNodeStream(input)
+
+
+    def reset(self):
+        BaseRecognizer.reset(self) # reset all recognizer state variables
+        if self.input is not None:
+            self.input.seek(0) # rewind the input
+
+
+    def setTreeNodeStream(self, input):
+        """Set the input stream"""
+
+        self.input = input
+
+
+    def getTreeNodeStream(self):
+        return self.input
+
+
+    def getSourceName(self):
+        return self.input.getSourceName()
+
+
+    def getCurrentInputSymbol(self, input):
+        return input.LT(1)
+
+
+    def getMissingSymbol(self, input, e, expectedTokenType, follow):
+        tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
+        return CommonTree(CommonToken(type=expectedTokenType, text=tokenText))
+
+
+    def matchAny(self, ignore): # ignore stream, copy of this.input
+        """
+        Match '.' in tree parser has special meaning.  Skip node or
+        entire tree if node has children.  If children, scan until
+        corresponding UP node.
+        """
+        
+        self._state.errorRecovery = False
+
+        look = self.input.LT(1)
+        if self.input.getTreeAdaptor().getChildCount(look) == 0:
+            self.input.consume() # not subtree, consume 1 node and return
+            return
+
+        # current node is a subtree, skip to corresponding UP.
+        # must count nesting level to get right UP
+        level = 0
+        tokenType = self.input.getTreeAdaptor().getType(look)
+        while tokenType != EOF and not (tokenType == UP and level==0):
+            self.input.consume()
+            look = self.input.LT(1)
+            tokenType = self.input.getTreeAdaptor().getType(look)
+            if tokenType == DOWN:
+                level += 1
+
+            elif tokenType == UP:
+                level -= 1
+
+        self.input.consume() # consume UP
+
+
+    def mismatch(self, input, ttype, follow):
+        """
+        We have DOWN/UP nodes in the stream that have no line info; override.
+        plus we want to alter the exception type. Don't try to recover
+        from tree parser errors inline...
+        """
+
+        raise MismatchedTreeNodeException(ttype, input)
+
+
+    def getErrorHeader(self, e):
+        """
+        Prefix error message with the grammar name because message is
+        always intended for the programmer because the parser built
+        the input tree not the user.
+        """
+
+        return (self.getGrammarFileName() +
+                ": node from %sline %s:%s"
+                % (['', "after "][e.approximateLineInfo],
+                   e.line,
+                   e.charPositionInLine
+                   )
+                )
+
+    def getErrorMessage(self, e, tokenNames):
+        """
+        Tree parsers parse nodes they usually have a token object as
+        payload. Set the exception token and do the default behavior.
+        """
+
+        if isinstance(self, TreeParser):
+            adaptor = e.input.getTreeAdaptor()
+            e.token = adaptor.getToken(e.node)
+            if e.token is not None: # could be an UP/DOWN node
+                e.token = CommonToken(
+                    type=adaptor.getType(e.node),
+                    text=adaptor.getText(e.node)
+                    )
+
+        return BaseRecognizer.getErrorMessage(self, e, tokenNames)
+
+
+    def traceIn(self, ruleName, ruleIndex):
+        BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
+
+
+    def traceOut(self, ruleName, ruleIndex):
+        BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
+
+
+#############################################################################
+#
+# streams for rule rewriting
+#
+#############################################################################
+
+class RewriteRuleElementStream(object):
+    """@brief Internal helper class.
+    
+    A generic list of elements tracked in an alternative to be used in
+    a -> rewrite rule.  We need to subclass to fill in the next() method,
+    which returns either an AST node wrapped around a token payload or
+    an existing subtree.
+
+    Once you start next()ing, do not try to add more elements.  It will
+    break the cursor tracking I believe.
+
+    @see org.antlr.runtime.tree.RewriteRuleSubtreeStream
+    @see org.antlr.runtime.tree.RewriteRuleTokenStream
+    
+    TODO: add mechanism to detect/puke on modification after reading from
+    stream
+    """
+
+    def __init__(self, adaptor, elementDescription, elements=None):
+        # Cursor 0..n-1.  If singleElement!=null, cursor is 0 until you next(),
+        # which bumps it to 1 meaning no more elements.
+        self.cursor = 0
+
+        # Track single elements w/o creating a list.  Upon 2nd add, alloc list
+        self.singleElement = None
+
+        # The list of tokens or subtrees we are tracking
+        self.elements = None
+
+        # Once a node / subtree has been used in a stream, it must be dup'd
+        # from then on.  Streams are reset after subrules so that the streams
+        # can be reused in future subrules.  So, reset must set a dirty bit.
+        # If dirty, then next() always returns a dup.
+        self.dirty = False
+        
+        # The element or stream description; usually has name of the token or
+        # rule reference that this list tracks.  Can include rulename too, but
+        # the exception would track that info.
+        self.elementDescription = elementDescription
+
+        self.adaptor = adaptor
+
+        if isinstance(elements, (list, tuple)):
+            # Create a stream, but feed off an existing list
+            self.singleElement = None
+            self.elements = elements
+
+        else:
+            # Create a stream with one element
+            self.add(elements)
+
+
+    def reset(self):
+        """
+        Reset the condition of this stream so that it appears we have
+        not consumed any of its elements.  Elements themselves are untouched.
+        Once we reset the stream, any future use will need duplicates.  Set
+        the dirty bit.
+        """
+        
+        self.cursor = 0
+        self.dirty = True
+
+        
+    def add(self, el):
+        if el is None:
+            return
+
+        if self.elements is not None: # if in list, just add
+            self.elements.append(el)
+            return
+
+        if self.singleElement is None: # no elements yet, track w/o list
+            self.singleElement = el
+            return
+
+        # adding 2nd element, move to list
+        self.elements = []
+        self.elements.append(self.singleElement)
+        self.singleElement = None
+        self.elements.append(el)
+
+
+    def nextTree(self):
+        """
+        Return the next element in the stream.  If out of elements, throw
+        an exception unless size()==1.  If size is 1, then return elements[0].
+        
+        Return a duplicate node/subtree if stream is out of elements and
+        size==1. If we've already used the element, dup (dirty bit set).
+        """
+        
+        if (self.dirty
+            or (self.cursor >= len(self) and len(self) == 1)
+            ):
+            # if out of elements and size is 1, dup
+            el = self._next()
+            return self.dup(el)
+
+        # test size above then fetch
+        el = self._next()
+        return el
+
+
+    def _next(self):
+        """
+        do the work of getting the next element, making sure that it's
+        a tree node or subtree.  Deal with the optimization of single-
+        element list versus list of size > 1.  Throw an exception
+        if the stream is empty or we're out of elements and size>1.
+        protected so you can override in a subclass if necessary.
+        """
+
+        if len(self) == 0:
+            raise RewriteEmptyStreamException(self.elementDescription)
+            
+        if self.cursor >= len(self): # out of elements?
+            if len(self) == 1: # if size is 1, it's ok; return and we'll dup 
+                return self.toTree(self.singleElement)
+
+            # out of elements and size was not 1, so we can't dup
+            raise RewriteCardinalityException(self.elementDescription)
+
+        # we have elements
+        if self.singleElement is not None:
+            self.cursor += 1 # move cursor even for single element list
+            return self.toTree(self.singleElement)
+
+        # must have more than one in list, pull from elements
+        o = self.toTree(self.elements[self.cursor])
+        self.cursor += 1
+        return o
+
+
+    def dup(self, el):
+        """
+        When constructing trees, sometimes we need to dup a token or AST
+        subtree.  Dup'ing a token means just creating another AST node
+        around it.  For trees, you must call the adaptor.dupTree() unless
+        the element is for a tree root; then it must be a node dup.
+        """
+
+        raise NotImplementedError
+    
+
+    def toTree(self, el):
+        """
+        Ensure stream emits trees; tokens must be converted to AST nodes.
+        AST nodes can be passed through unmolested.
+        """
+
+        return el
+
+
+    def hasNext(self):
+        return ( (self.singleElement is not None and self.cursor < 1)
+                 or (self.elements is not None
+                     and self.cursor < len(self.elements)
+                     )
+                 )
+
+                 
+    def size(self):
+        if self.singleElement is not None:
+            return 1
+
+        if self.elements is not None:
+            return len(self.elements)
+
+        return 0
+
+    __len__ = size
+    
+
+    def getDescription(self):
+        """Deprecated. Directly access elementDescription attribute"""
+        
+        return self.elementDescription
+
+
+class RewriteRuleTokenStream(RewriteRuleElementStream):
+    """@brief Internal helper class."""
+
+    def toTree(self, el):
+        # Don't convert to a tree unless they explicitly call nextTree.
+        # This way we can do hetero tree nodes in rewrite.
+        return el
+
+
+    def nextNode(self):
+        t = self._next()
+        return self.adaptor.createWithPayload(t)
+
+    
+    def nextToken(self):
+        return self._next()
+
+    
+    def dup(self, el):
+        raise TypeError("dup can't be called for a token stream.")
+
+
+class RewriteRuleSubtreeStream(RewriteRuleElementStream):
+    """@brief Internal helper class."""
+
+    def nextNode(self):
+        """
+        Treat next element as a single node even if it's a subtree.
+        This is used instead of next() when the result has to be a
+        tree root node.  Also prevents us from duplicating recently-added
+        children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
+        must dup the type node, but ID has been added.
+
+        Referencing a rule result twice is ok; dup entire tree as
+        we can't be adding trees as root; e.g., expr expr.
+
+        Hideous code duplication here with super.next().  Can't think of
+        a proper way to refactor.  This needs to always call dup node
+        and super.next() doesn't know which to call: dup node or dup tree.
+        """
+        
+        if (self.dirty
+            or (self.cursor >= len(self) and len(self) == 1)
+            ):
+            # if out of elements and size is 1, dup (at most a single node
+            # since this is for making root nodes).
+            el = self._next()
+            return self.adaptor.dupNode(el)
+
+        # test size above then fetch
+        el = self._next()
+        return el
+
+
+    def dup(self, el):
+        return self.adaptor.dupTree(el)
+
+
+
+class RewriteRuleNodeStream(RewriteRuleElementStream):
+    """
+    Queues up nodes matched on left side of -> in a tree parser. This is
+    the analog of RewriteRuleTokenStream for normal parsers. 
+    """
+    
+    def nextNode(self):
+        return self._next()
+
+
+    def toTree(self, el):
+        return self.adaptor.dupNode(el)
+
+
+    def dup(self, el):
+        # we dup every node, so don't have to worry about calling dup; short-
+        #circuited next() so it doesn't call.
+        raise TypeError("dup can't be called for a node stream.")
+
+
+class TreeRuleReturnScope(RuleReturnScope):
+    """
+    This is identical to the ParserRuleReturnScope except that
+    the start property is a tree nodes not Token object
+    when you are parsing trees.  To be generic the tree node types
+    have to be Object.
+    """
+
+    def __init__(self):
+        self.start = None
+        self.tree = None
+        
+    
+    def getStart(self):
+        return self.start
+
+    
+    def getTree(self):
+        return self.tree
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/treewizard.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,612 @@
+""" @package antlr3.tree
+@brief ANTLR3 runtime package, treewizard module
+
+A utility module to create ASTs at runtime.
+See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
+
+"""
+
+# begin[licence]
+#
+# [The "BSD licence"]
+# Copyright (c) 2005-2008 Terence Parr
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+# 3. The name of the author may not be used to endorse or promote products
+#    derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# end[licence]
+
+from antlr3.constants import INVALID_TOKEN_TYPE
+from antlr3.tokens import CommonToken
+from antlr3.tree import CommonTree, CommonTreeAdaptor
+
+
+def computeTokenTypes(tokenNames):
+    """
+    Compute a dict that is an inverted index of
+    tokenNames (which maps int token types to names).
+    """
+
+    if tokenNames is None:
+        return {}
+
+    return dict((name, type) for type, name in enumerate(tokenNames))
+
+
+## token types for pattern parser
+EOF = -1
+BEGIN = 1
+END = 2
+ID = 3
+ARG = 4
+PERCENT = 5
+COLON = 6
+DOT = 7
+
+class TreePatternLexer(object):
+    def __init__(self, pattern):
+        ## The tree pattern to lex like "(A B C)"
+        self.pattern = pattern
+
+	## Index into input string
+        self.p = -1
+
+	## Current char
+        self.c = None
+
+	## How long is the pattern in char?
+        self.n = len(pattern)
+
+	## Set when token type is ID or ARG
+        self.sval = None
+
+        self.error = False
+
+        self.consume()
+
+
+    __idStartChar = frozenset(
+        'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
+        )
+    __idChar = __idStartChar | frozenset('0123456789')
+    
+    def nextToken(self):
+        self.sval = ""
+        while self.c != EOF:
+            if self.c in (' ', '\n', '\r', '\t'):
+                self.consume()
+                continue
+
+            if self.c in self.__idStartChar:
+                self.sval += self.c
+                self.consume()
+                while self.c in self.__idChar:
+                    self.sval += self.c
+                    self.consume()
+
+                return ID
+
+            if self.c == '(':
+                self.consume()
+                return BEGIN
+
+            if self.c == ')':
+                self.consume()
+                return END
+
+            if self.c == '%':
+                self.consume()
+                return PERCENT
+
+            if self.c == ':':
+                self.consume()
+                return COLON
+
+            if self.c == '.':
+                self.consume()
+                return DOT
+
+            if self.c == '[': # grab [x] as a string, returning x
+                self.consume()
+                while self.c != ']':
+                    if self.c == '\\':
+                        self.consume()
+                        if self.c != ']':
+                            self.sval += '\\'
+
+                        self.sval += self.c
+
+                    else:
+                        self.sval += self.c
+
+                    self.consume()
+
+                self.consume()
+                return ARG
+
+            self.consume()
+            self.error = True
+            return EOF
+
+        return EOF
+
+
+    def consume(self):
+        self.p += 1
+        if self.p >= self.n:
+            self.c = EOF
+
+        else:
+            self.c = self.pattern[self.p]
+
+
+class TreePatternParser(object):
+    def __init__(self, tokenizer, wizard, adaptor):
+        self.tokenizer = tokenizer
+        self.wizard = wizard
+        self.adaptor = adaptor
+        self.ttype = tokenizer.nextToken() # kickstart
+
+
+    def pattern(self):
+        if self.ttype == BEGIN:
+            return self.parseTree()
+
+        elif self.ttype == ID:
+            node = self.parseNode()
+            if self.ttype == EOF:
+                return node
+
+            return None # extra junk on end
+
+        return None
+
+
+    def parseTree(self):
+        if self.ttype != BEGIN:
+            return None
+
+        self.ttype = self.tokenizer.nextToken()
+        root = self.parseNode()
+        if root is None:
+            return None
+
+        while self.ttype in (BEGIN, ID, PERCENT, DOT):
+            if self.ttype == BEGIN:
+                subtree = self.parseTree()
+                self.adaptor.addChild(root, subtree)
+
+            else:
+                child = self.parseNode()
+                if child is None:
+                    return None
+
+                self.adaptor.addChild(root, child)
+
+        if self.ttype != END:
+            return None
+
+        self.ttype = self.tokenizer.nextToken()
+        return root
+
+
+    def parseNode(self):
+        # "%label:" prefix
+        label = None
+        
+        if self.ttype == PERCENT:
+            self.ttype = self.tokenizer.nextToken()
+            if self.ttype != ID:
+                return None
+
+            label = self.tokenizer.sval
+            self.ttype = self.tokenizer.nextToken()
+            if self.ttype != COLON:
+                return None
+            
+            self.ttype = self.tokenizer.nextToken() # move to ID following colon
+
+        # Wildcard?
+        if self.ttype == DOT:
+            self.ttype = self.tokenizer.nextToken()
+            wildcardPayload = CommonToken(0, ".")
+            node = WildcardTreePattern(wildcardPayload)
+            if label is not None:
+                node.label = label
+            return node
+
+        # "ID" or "ID[arg]"
+        if self.ttype != ID:
+            return None
+
+        tokenName = self.tokenizer.sval
+        self.ttype = self.tokenizer.nextToken()
+        
+        if tokenName == "nil":
+            return self.adaptor.nil()
+
+        text = tokenName
+        # check for arg
+        arg = None
+        if self.ttype == ARG:
+            arg = self.tokenizer.sval
+            text = arg
+            self.ttype = self.tokenizer.nextToken()
+
+        # create node
+        treeNodeType = self.wizard.getTokenType(tokenName)
+        if treeNodeType == INVALID_TOKEN_TYPE:
+            return None
+
+        node = self.adaptor.createFromType(treeNodeType, text)
+        if label is not None and isinstance(node, TreePattern):
+            node.label = label
+
+        if arg is not None and isinstance(node, TreePattern):
+            node.hasTextArg = True
+
+        return node
+
+
+class TreePattern(CommonTree):
+    """
+    When using %label:TOKENNAME in a tree for parse(), we must
+    track the label.
+    """
+
+    def __init__(self, payload):
+        CommonTree.__init__(self, payload)
+
+        self.label = None
+        self.hasTextArg = None
+        
+
+    def toString(self):
+        if self.label is not None:
+            return '%' + self.label + ':' + CommonTree.toString(self)
+        
+        else:
+            return CommonTree.toString(self)
+
+
+class WildcardTreePattern(TreePattern):
+    pass
+
+
+class TreePatternTreeAdaptor(CommonTreeAdaptor):
+    """This adaptor creates TreePattern objects for use during scan()"""
+
+    def createWithPayload(self, payload):
+        return TreePattern(payload)
+
+
+class TreeWizard(object):
+    """
+    Build and navigate trees with this object.  Must know about the names
+    of tokens so you have to pass in a map or array of token names (from which
+    this class can build the map).  I.e., Token DECL means nothing unless the
+    class can translate it to a token type.
+
+    In order to create nodes and navigate, this class needs a TreeAdaptor.
+
+    This class can build a token type -> node index for repeated use or for
+    iterating over the various nodes with a particular type.
+
+    This class works in conjunction with the TreeAdaptor rather than moving
+    all this functionality into the adaptor.  An adaptor helps build and
+    navigate trees using methods.  This class helps you do it with string
+    patterns like "(A B C)".  You can create a tree from that pattern or
+    match subtrees against it.
+    """
+
+    def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
+        self.adaptor = adaptor
+        if typeMap is None:
+            self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
+
+        else:
+            if tokenNames is not None:
+                raise ValueError("Can't have both tokenNames and typeMap")
+
+            self.tokenNameToTypeMap = typeMap
+
+
+    def getTokenType(self, tokenName):
+        """Using the map of token names to token types, return the type."""
+
+        try:
+            return self.tokenNameToTypeMap[tokenName]
+        except KeyError:
+            return INVALID_TOKEN_TYPE
+
+
+    def create(self, pattern):
+        """
+        Create a tree or node from the indicated tree pattern that closely
+        follows ANTLR tree grammar tree element syntax:
+        
+        (root child1 ... child2).
+        
+        You can also just pass in a node: ID
+         
+        Any node can have a text argument: ID[foo]
+        (notice there are no quotes around foo--it's clear it's a string).
+        
+        nil is a special name meaning "give me a nil node".  Useful for
+        making lists: (nil A B C) is a list of A B C.
+        """
+        
+        tokenizer = TreePatternLexer(pattern)
+        parser = TreePatternParser(tokenizer, self, self.adaptor)
+        return parser.pattern()
+
+
+    def index(self, tree):
+        """Walk the entire tree and make a node name to nodes mapping.
+        
+        For now, use recursion but later nonrecursive version may be
+        more efficient.  Returns a dict int -> list where the list is
+        of your AST node type.  The int is the token type of the node.
+        """
+
+        m = {}
+        self._index(tree, m)
+        return m
+
+
+    def _index(self, t, m):
+        """Do the work for index"""
+
+        if t is None:
+            return
+
+        ttype = self.adaptor.getType(t)
+        elements = m.get(ttype)
+        if elements is None:
+            m[ttype] = elements = []
+
+        elements.append(t)
+        for i in range(self.adaptor.getChildCount(t)):
+            child = self.adaptor.getChild(t, i)
+            self._index(child, m)
+
+
+    def find(self, tree, what):
+        """Return a list of matching token.
+
+        what may either be an integer specifzing the token type to find or
+        a string with a pattern that must be matched.
+        
+        """
+        
+        if isinstance(what, (int, long)):
+            return self._findTokenType(tree, what)
+
+        elif isinstance(what, basestring):
+            return self._findPattern(tree, what)
+
+        else:
+            raise TypeError("'what' must be string or integer")
+
+
+    def _findTokenType(self, t, ttype):
+        """Return a List of tree nodes with token type ttype"""
+
+        nodes = []
+
+        def visitor(tree, parent, childIndex, labels):
+            nodes.append(tree)
+
+        self.visit(t, ttype, visitor)
+
+        return nodes
+
+
+    def _findPattern(self, t, pattern):
+        """Return a List of subtrees matching pattern."""
+        
+        subtrees = []
+        
+        # Create a TreePattern from the pattern
+        tokenizer = TreePatternLexer(pattern)
+        parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
+        tpattern = parser.pattern()
+        
+        # don't allow invalid patterns
+        if (tpattern is None or tpattern.isNil()
+            or isinstance(tpattern, WildcardTreePattern)):
+            return None
+
+        rootTokenType = tpattern.getType()
+
+        def visitor(tree, parent, childIndex, label):
+            if self._parse(tree, tpattern, None):
+                subtrees.append(tree)
+                
+        self.visit(t, rootTokenType, visitor)
+
+        return subtrees
+
+
+    def visit(self, tree, what, visitor):
+        """Visit every node in tree matching what, invoking the visitor.
+
+        If what is a string, it is parsed as a pattern and only matching
+        subtrees will be visited.
+        The implementation uses the root node of the pattern in combination
+        with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
+        Patterns with wildcard roots are also not allowed.
+
+        If what is an integer, it is used as a token type and visit will match
+        all nodes of that type (this is faster than the pattern match).
+        The labels arg of the visitor action method is never set (it's None)
+        since using a token type rather than a pattern doesn't let us set a
+        label.
+        """
+
+        if isinstance(what, (int, long)):
+            self._visitType(tree, None, 0, what, visitor)
+
+        elif isinstance(what, basestring):
+            self._visitPattern(tree, what, visitor)
+
+        else:
+            raise TypeError("'what' must be string or integer")
+        
+              
+    def _visitType(self, t, parent, childIndex, ttype, visitor):
+        """Do the recursive work for visit"""
+        
+        if t is None:
+            return
+
+        if self.adaptor.getType(t) == ttype:
+            visitor(t, parent, childIndex, None)
+
+        for i in range(self.adaptor.getChildCount(t)):
+            child = self.adaptor.getChild(t, i)
+            self._visitType(child, t, i, ttype, visitor)
+
+
+    def _visitPattern(self, tree, pattern, visitor):
+        """
+        For all subtrees that match the pattern, execute the visit action.
+        """
+
+        # Create a TreePattern from the pattern
+        tokenizer = TreePatternLexer(pattern)
+        parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
+        tpattern = parser.pattern()
+        
+        # don't allow invalid patterns
+        if (tpattern is None or tpattern.isNil()
+            or isinstance(tpattern, WildcardTreePattern)):
+            return
+
+        rootTokenType = tpattern.getType()
+
+        def rootvisitor(tree, parent, childIndex, labels):
+            labels = {}
+            if self._parse(tree, tpattern, labels):
+                visitor(tree, parent, childIndex, labels)
+                
+        self.visit(tree, rootTokenType, rootvisitor)
+        
+
+    def parse(self, t, pattern, labels=None):
+        """
+        Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
+        on the various nodes and '.' (dot) as the node/subtree wildcard,
+        return true if the pattern matches and fill the labels Map with
+        the labels pointing at the appropriate nodes.  Return false if
+        the pattern is malformed or the tree does not match.
+
+        If a node specifies a text arg in pattern, then that must match
+        for that node in t.
+        """
+
+        tokenizer = TreePatternLexer(pattern)
+        parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
+        tpattern = parser.pattern()
+
+        return self._parse(t, tpattern, labels)
+
+
+    def _parse(self, t1, t2, labels):
+        """
+        Do the work for parse. Check to see if the t2 pattern fits the
+        structure and token types in t1.  Check text if the pattern has
+        text arguments on nodes.  Fill labels map with pointers to nodes
+        in tree matched against nodes in pattern with labels.
+	"""
+        
+        # make sure both are non-null
+        if t1 is None or t2 is None:
+            return False
+
+        # check roots (wildcard matches anything)
+        if not isinstance(t2, WildcardTreePattern):
+            if self.adaptor.getType(t1) != t2.getType():
+                return False
+
+            if t2.hasTextArg and self.adaptor.getText(t1) != t2.getText():
+                return False
+
+        if t2.label is not None and labels is not None:
+            # map label in pattern to node in t1
+            labels[t2.label] = t1
+
+        # check children
+        n1 = self.adaptor.getChildCount(t1)
+        n2 = t2.getChildCount()
+        if n1 != n2:
+            return False
+
+        for i in range(n1):
+            child1 = self.adaptor.getChild(t1, i)
+            child2 = t2.getChild(i)
+            if not self._parse(child1, child2, labels):
+                return False
+
+        return True
+
+
+    def equals(self, t1, t2, adaptor=None):
+        """
+        Compare t1 and t2; return true if token types/text, structure match
+        exactly.
+        The trees are examined in their entirety so that (A B) does not match
+        (A B C) nor (A (B C)). 
+        """
+
+        if adaptor is None:
+            adaptor = self.adaptor
+
+        return self._equals(t1, t2, adaptor)
+
+
+    def _equals(self, t1, t2, adaptor):
+        # make sure both are non-null
+        if t1 is None or t2 is None:
+            return False
+
+        # check roots
+        if adaptor.getType(t1) != adaptor.getType(t2):
+            return False
+
+        if adaptor.getText(t1) != adaptor.getText(t2):
+            return False
+        
+        # check children
+        n1 = adaptor.getChildCount(t1)
+        n2 = adaptor.getChildCount(t2)
+        if n1 != n2:
+            return False
+
+        for i in range(n1):
+            child1 = adaptor.getChild(t1, i)
+            child2 = adaptor.getChild(t2, i)
+            if not self._equals(child1, child2, adaptor):
+                return False
+
+        return True
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/PKG-INFO	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,13 @@
+Metadata-Version: 1.0
+Name: antlr-python-runtime
+Version: 3.1
+Summary: Runtime package for ANTLR3
+Home-page: http://www.antlr.org/
+Author: Benjamin Niemann
+Author-email: pink@odahoda.de
+License: BSD
+Download-URL: http://www.antlr.org/download.html
+Description: This is the runtime package for ANTLR3, which is required to use parsers
+        generated by ANTLR3.
+        
+Platform: UNKNOWN
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/SOURCES.txt	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,23 @@
+AUTHORS
+LICENSE
+MANIFEST.in
+README
+ez_setup.py
+setup.py
+antlr3/__init__.py
+antlr3/compat.py
+antlr3/constants.py
+antlr3/dfa.py
+antlr3/dottreegen.py
+antlr3/exceptions.py
+antlr3/extras.py
+antlr3/main.py
+antlr3/recognizers.py
+antlr3/streams.py
+antlr3/tokens.py
+antlr3/tree.py
+antlr3/treewizard.py
+antlr_python_runtime.egg-info/PKG-INFO
+antlr_python_runtime.egg-info/SOURCES.txt
+antlr_python_runtime.egg-info/dependency_links.txt
+antlr_python_runtime.egg-info/top_level.txt
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/dependency_links.txt	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,1 @@
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/top_level.txt	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,1 @@
+antlr3
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/setup.py	Tue Jan 20 13:19:45 2009 +0000
@@ -0,0 +1,289 @@
+# bootstrapping setuptools
+import ez_setup
+ez_setup.use_setuptools()
+
+import os
+import sys
+import textwrap
+from distutils.errors import *
+from distutils.command.clean import clean as _clean
+from distutils.cmd import Command
+from setuptools import setup
+from distutils import log
+
+from distutils.core import setup
+
+
+class clean(_clean):
+    """Also cleanup local temp files."""
+
+    def run(self):
+        _clean.run(self)
+
+        import fnmatch
+        
+        # kill temporary files
+        patterns = [
+            # generic tempfiles
+            '*~', '*.bak', '*.pyc',
+
+            # tempfiles generated by ANTLR runs
+            't[0-9]*Lexer.py', 't[0-9]*Parser.py',
+            '*.tokens', '*__.g',
+            ]
+            
+        for path in ('antlr3', 'unittests', 'tests'):
+            path = os.path.join(os.path.dirname(__file__), path)
+            if os.path.isdir(path):
+                for root, dirs, files in os.walk(path, topdown=True):
+                    graveyard = []                    
+                    for pat in patterns:
+                        graveyard.extend(fnmatch.filter(files, pat))
+
+                    for name in graveyard:
+                        filePath = os.path.join(root, name)
+
+                        try:
+                            log.info("removing '%s'", filePath)
+                            os.unlink(filePath)
+                        except OSError, exc:
+                            log.warn(
+                                "Failed to delete '%s': %s",
+                                filePath, exc
+                                )
+
+            
+class TestError(DistutilsError):
+    pass
+
+
+# grml.. the class name appears in the --help output:
+# ...
+# Options for 'CmdUnitTest' command
+# ...
+# so I have to use a rather ugly name...
+class unittest(Command):
+    """Run unit tests for package"""
+
+    description = "run unit tests for package"
+
+    user_options = [
+        ]
+    boolean_options = []
+
+    def initialize_options(self):
+        pass
+    
+    def finalize_options(self):
+        pass
+    
+    def run(self):
+        testDir = os.path.join(os.path.dirname(__file__), 'unittests')
+        if not os.path.isdir(testDir):
+            raise DistutilsFileError(
+                "There is not 'unittests' directory. Did you fetch the "
+                "development version?",
+                )
+
+        import glob
+        import imp
+        import unittest
+        import traceback
+        import StringIO
+        
+        suite = unittest.TestSuite()
+        loadFailures = []
+        
+        # collect tests from all unittests/test*.py files
+        testFiles = []
+        for testPath in glob.glob(os.path.join(testDir, 'test*.py')):
+            testFiles.append(testPath)
+
+        testFiles.sort()
+        for testPath in testFiles:
+            testID = os.path.basename(testPath)[:-3]
+
+            try:
+                modFile, modPathname, modDescription \
+                         = imp.find_module(testID, [testDir])
+
+                testMod = imp.load_module(
+                    testID, modFile, modPathname, modDescription
+                    )
+                
+                suite.addTests(
+                    unittest.defaultTestLoader.loadTestsFromModule(testMod)
+                    )
+                
+            except Exception:
+                buf = StringIO.StringIO()
+                traceback.print_exc(file=buf)
+                
+                loadFailures.append(
+                    (os.path.basename(testPath), buf.getvalue())
+                    )              
+                
+            
+        runner = unittest.TextTestRunner(verbosity=2)
+        result = runner.run(suite)
+
+        for testName, error in loadFailures:
+            sys.stderr.write('\n' + '='*70 + '\n')
+            sys.stderr.write(
+                "Failed to load test module %s\n" % testName
+                )
+            sys.stderr.write(error)
+            sys.stderr.write('\n')
+            
+        if not result.wasSuccessful() or loadFailures:
+            raise TestError(
+                "Unit test suite failed!",
+                )
+            
+
+class functest(Command):
+    """Run functional tests for package"""
+
+    description = "run functional tests for package"
+
+    user_options = [
+        ('testcase=', None,
+         "testcase to run [default: run all]"),
+        ('antlr-version=', None,
+         "ANTLR version to use [default: HEAD (in ../../build)]"),
+        ]
+    
+    boolean_options = []
+
+    def initialize_options(self):
+        self.testcase = None
+        self.antlr_version = 'HEAD'
+
+    
+    def finalize_options(self):
+        pass
+
+    
+    def run(self):
+        import glob
+        import imp
+        import unittest
+        import traceback
+        import StringIO
+        
+        testDir = os.path.join(os.path.dirname(__file__), 'tests')
+        if not os.path.isdir(testDir):
+            raise DistutilsFileError(
+                "There is not 'tests' directory. Did you fetch the "
+                "development version?",
+                )
+
+        # make sure, relative imports from testcases work
+        sys.path.insert(0, testDir)
+
+        rootDir = os.path.abspath(
+            os.path.join(os.path.dirname(__file__), '..', '..'))
+
+        if self.antlr_version == 'HEAD':
+            classpath = [
+                os.path.join(rootDir, 'build', 'classes'),
+                os.path.join(rootDir, 'build', 'rtclasses')
+                ]
+        else:
+            classpath = [
+                os.path.join(rootDir, 'archive',
+                             'antlr-%s.jar' % self.antlr_version)
+                ]
+
+        classpath.extend([
+            os.path.join(rootDir, 'lib', 'antlr-2.7.7.jar'),
+            os.path.join(rootDir, 'lib', 'stringtemplate-3.2.jar'),
+            os.path.join(rootDir, 'lib', 'junit-4.2.jar')
+            ])
+        os.environ['CLASSPATH'] = ':'.join(classpath)
+
+        os.environ['ANTLRVERSION'] = self.antlr_version
+
+        suite = unittest.TestSuite()
+        loadFailures = []
+        
+        # collect tests from all tests/t*.py files
+        testFiles = []
+        for testPath in glob.glob(os.path.join(testDir, 't*.py')):
+            if (testPath.endswith('Lexer.py')
+                or testPath.endswith('Parser.py')
+                ):
+                continue
+
+            # if a single testcase has been selected, filter out all other
+            # tests
+            if (self.testcase is not None
+                and os.path.basename(testPath)[:-3] != self.testcase
+                ):
+                continue
+            
+            testFiles.append(testPath)
+
+        testFiles.sort()
+        for testPath in testFiles:
+            testID = os.path.basename(testPath)[:-3]
+
+            try:
+                modFile, modPathname, modDescription \
+                         = imp.find_module(testID, [testDir])
+
+                testMod = imp.load_module(
+                    testID, modFile, modPathname, modDescription
+                    )
+                
+                suite.addTests(
+                    unittest.defaultTestLoader.loadTestsFromModule(testMod)
+                    )
+                
+            except Exception:
+                buf = StringIO.StringIO()
+                traceback.print_exc(file=buf)
+                
+                loadFailures.append(
+                    (os.path.basename(testPath), buf.getvalue())
+                    )              
+                
+            
+        runner = unittest.TextTestRunner(verbosity=2)
+        result = runner.run(suite)
+
+        for testName, error in loadFailures:
+            sys.stderr.write('\n' + '='*70 + '\n')
+            sys.stderr.write(
+                "Failed to load test module %s\n" % testName
+                )
+            sys.stderr.write(error)
+            sys.stderr.write('\n')
+            
+        if not result.wasSuccessful() or loadFailures:
+            raise TestError(
+                "Functional test suite failed!",
+                )
+            
+
+setup(name='antlr_python_runtime',
+      version='3.1',
+      packages=['antlr3'],
+
+      author="Benjamin Niemann",
+      author_email="pink@odahoda.de",
+      url="http://www.antlr.org/",
+      download_url="http://www.antlr.org/download.html",
+      license="BSD",
+      description="Runtime package for ANTLR3",
+      long_description=textwrap.dedent('''\
+      This is the runtime package for ANTLR3, which is required to use parsers
+      generated by ANTLR3.
+      '''),
+      
+      
+      cmdclass={'unittest': unittest,
+                'functest': functest,
+                'clean': clean
+                },
+      )
--- a/thirdparty/google_appengine/tools/bulkload_client.py	Tue Jan 20 01:12:43 2009 +0000
+++ b/thirdparty/google_appengine/tools/bulkload_client.py	Tue Jan 20 13:19:45 2009 +0000
@@ -20,6 +20,10 @@
 import os
 import sys
 
+sys.stderr.write("This version of bulkload_client.py has been deprecated; "
+                 "please use the version at the root of your Google App "
+                 "Engine SDK install.")
+
 if not hasattr(sys, 'version_info'):
   sys.stderr.write('Very old versions of Python are not supported. Please '
                    'use version 2.5 or greater.\n')