Update GAE to 1.2.3 in thirdparty folder.
authorPawel Solyga <Pawel.Solyga@gmail.com>
Fri, 19 Jun 2009 16:13:32 +0200
changeset 2413 d0b7dac5325c
parent 2412 c61d96e72e6f
child 2414 a95ba3595554
Update GAE to 1.2.3 in thirdparty folder.
thirdparty/google_appengine/RELEASE_NOTES
thirdparty/google_appengine/VERSION
thirdparty/google_appengine/google/appengine/api/apiproxy_rpc.py
thirdparty/google_appengine/google/appengine/api/apiproxy_stub_map.py
thirdparty/google_appengine/google/appengine/api/datastore.py
thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py
thirdparty/google_appengine/google/appengine/api/datastore_types.py
thirdparty/google_appengine/google/appengine/api/labs/__init__.py
thirdparty/google_appengine/google/appengine/api/labs/taskqueue/__init__.py
thirdparty/google_appengine/google/appengine/api/labs/taskqueue/taskqueue.py
thirdparty/google_appengine/google/appengine/api/labs/taskqueue/taskqueue_service_pb.py
thirdparty/google_appengine/google/appengine/api/labs/taskqueue/taskqueue_stub.py
thirdparty/google_appengine/google/appengine/api/memcache/__init__.py
thirdparty/google_appengine/google/appengine/api/queueinfo.py
thirdparty/google_appengine/google/appengine/api/urlfetch.py
thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py
thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py
thirdparty/google_appengine/google/appengine/dist/__init__.py
thirdparty/google_appengine/google/appengine/dist/_library.py
thirdparty/google_appengine/google/appengine/dist/httplib.py
thirdparty/google_appengine/google/appengine/ext/admin/__init__.py
thirdparty/google_appengine/google/appengine/ext/admin/templates/base.html
thirdparty/google_appengine/google/appengine/ext/admin/templates/css/ae.css
thirdparty/google_appengine/google/appengine/ext/admin/templates/css/queues.css
thirdparty/google_appengine/google/appengine/ext/admin/templates/css/tasks.css
thirdparty/google_appengine/google/appengine/ext/admin/templates/js/webhook.js
thirdparty/google_appengine/google/appengine/ext/admin/templates/queues.html
thirdparty/google_appengine/google/appengine/ext/admin/templates/tasks.html
thirdparty/google_appengine/google/appengine/ext/db/__init__.py
thirdparty/google_appengine/google/appengine/ext/db/djangoforms.py
thirdparty/google_appengine/google/appengine/ext/gql/__init__.py
thirdparty/google_appengine/google/appengine/ext/preload/__init__.py
thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py
thirdparty/google_appengine/google/appengine/tools/appcfg.py
thirdparty/google_appengine/google/appengine/tools/bulkloader.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py
thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py
--- a/thirdparty/google_appengine/RELEASE_NOTES	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/RELEASE_NOTES	Fri Jun 19 16:13:32 2009 +0200
@@ -3,6 +3,30 @@
 
 App Engine Python SDK - Release Notes
 
+Version 1.2.3 - June 1, 2009
+============================
+
+  - Task Queue support available as google.appengine.api.labs.taskqueue.
+      http://code.google.com/appengine/docs/python/taskqueue
+  - Django 1.0 support. You must install Django locally on your machine
+    for the SDK but no longer need to upload it to App Engine.
+      from google.appengine.dist import use_library
+      use_library('django', '1.0')
+      http://code.google.com/p/googleappengine/issues/detail?id=872
+  - Urlfetch supports asynchronous requests.
+      http://code.google.com/p/googleappengine/issues/detail?id=958
+  - Urlfetch in SDK now matches App Engine more closely:
+    By default, it now sets the referer header, does not set the Accept
+    header, and sets Accept-Encoding to gzip.
+      http://code.google.com/p/googleappengine/issues/detail?id=970
+  - Fixed issue with httplib and absolute URLs.
+      http://code.google.com/p/googleappengine/issues/detail?id=1311
+  - Memcache key length is no longer restricted to 250 bytes: longer keys
+    will be replaced with a hash of the key.
+  - Datastore ancestor queries now work within transactions.
+  - Datastore transactions in SDK now snapshot on the first operation so they
+    do not see writes made during the transaction. Matches App Engine.  
+
 Version 1.2.2 - April 22, 2009
 ==============================
 
@@ -36,7 +60,6 @@
       http://code.google.com/p/googleappengine/issues/detail?id=779
   - Fixed issue with the color argument of the Images API composite method.
 
-
 Version 1.2.1 - April 13, 2009
 =============================
 
--- a/thirdparty/google_appengine/VERSION	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/VERSION	Fri Jun 19 16:13:32 2009 +0200
@@ -1,3 +1,3 @@
-release: "1.2.2"
-timestamp: 1240438569
+release: "1.2.3"
+timestamp: 1243913623
 api_versions: ['1']
--- a/thirdparty/google_appengine/google/appengine/api/apiproxy_rpc.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/api/apiproxy_rpc.py	Fri Jun 19 16:13:32 2009 +0200
@@ -93,8 +93,6 @@
 
   def Wait(self):
     """Waits on the API call associated with this RPC."""
-    assert self.__state is not RPC.IDLE, ('RPC for %s.%s has not been started' %
-                                          (self.package, self.call))
     rpc_completed = self._WaitImpl()
 
     assert rpc_completed, ('RPC for %s.%s was not completed, and no other ' +
--- a/thirdparty/google_appengine/google/appengine/api/apiproxy_stub_map.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/api/apiproxy_stub_map.py	Fri Jun 19 16:13:32 2009 +0200
@@ -21,6 +21,7 @@
   APIProxyStubMap: container of APIProxy stubs.
   apiproxy: global instance of an APIProxyStubMap.
   MakeSyncCall: APIProxy entry point.
+  UserRPC: User-visible class wrapping asynchronous RPCs.
 """
 
 
@@ -30,6 +31,9 @@
 import inspect
 import sys
 
+from google.appengine.api import apiproxy_rpc
+
+
 def CreateRPC(service):
   """Creates a RPC instance for the given service.
 
@@ -160,7 +164,7 @@
         function(service, call, request, response)
 
 
-class APIProxyStubMap:
+class APIProxyStubMap(object):
   """Container of APIProxy stubs for more convenient unittesting.
 
   Stubs may be either trivial implementations of APIProxy services (e.g.
@@ -202,7 +206,7 @@
       service: string
       stub: stub
     """
-    assert not self.__stub_map.has_key(service)
+    assert not self.__stub_map.has_key(service), repr(service)
     self.__stub_map[service] = stub
 
     if service == 'datastore':
@@ -241,6 +245,203 @@
     self.__postcall_hooks.Call(service, call, request, response)
 
 
+class UserRPC(object):
+  """Wrapper class for asynchronous RPC.
+
+  Simplest low-level usage pattern:
+
+    rpc = UserRPC('service', [deadline], [callback])
+    rpc.make_call('method', request, response)
+    .
+    .
+    .
+    rpc.wait()
+    rpc.check_success()
+
+  However, a service module normally provides a wrapper so that the
+  typical usage pattern becomes more like this:
+
+    from google.appengine.api import service
+    rpc = service.create_rpc([deadline], [callback])
+    service.make_method_call(rpc, [service-specific-args])
+    .
+    .
+    .
+    rpc.wait()
+    result = rpc.get_result()
+
+  The service.make_method_call() function sets a service- and method-
+  specific hook function that is called by rpc.get_result() with the
+  rpc object as its first argument, and service-specific value as its
+  second argument.  The hook function should call rpc.check_success()
+  and then extract the user-level result from the rpc.result
+  protobuffer.  Additional arguments may be passed from
+  make_method_call() to the get_result hook via the second argument.
+  """
+
+  __method = None
+  __get_result_hook = None
+  __user_data = None
+  __postcall_hooks_called = False
+
+  def __init__(self, service, deadline=None, callback=None):
+    """Constructor.
+
+    Args:
+      service: The service name.
+      deadline: Optional deadline.  Default depends on the implementation.
+      callback: Optional argument-less callback function.
+    """
+    self.__service = service
+    self.__rpc = CreateRPC(service)
+    self.__rpc.deadline = deadline
+    self.__rpc.callback = callback
+
+  @property
+  def service(self):
+    """Return the service name."""
+    return self.__service
+
+  @property
+  def method(self):
+    """Return the method name."""
+    return self.__method
+
+  @property
+  def deadline(self):
+    """Return the deadline, if set explicitly (otherwise None)."""
+    return self.__rpc.deadline
+
+  def __get_callback(self):
+    """Return the callback attribute, a function without arguments.
+
+    This attribute can also be assigned to.  For example, the
+    following code calls some_other_function(rpc) when the RPC is
+    complete:
+
+      rpc = service.create_rpc()
+      rpc.callback = lambda: some_other_function(rpc)
+      service.make_method_call(rpc)
+      rpc.wait()
+    """
+    return self.__rpc.callback
+  def __set_callback(self, callback):
+    """Set the callback function."""
+    self.__rpc.callback = callback
+  callback = property(__get_callback, __set_callback)
+
+  @property
+  def request(self):
+    """Return the request protocol buffer object."""
+    return self.__rpc.request
+
+  @property
+  def response(self):
+    """Return the response protocol buffer object."""
+    return self.__rpc.response
+
+  @property
+  def state(self):
+    """Return the RPC state.
+
+    Possible values are attributes of apiproxy_rpc.RPC: IDLE, RUNNING,
+    FINISHING.
+    """
+    return self.__rpc.state
+
+  @property
+  def get_result_hook(self):
+    """Return the get-result hook function."""
+    return self.__get_result_hook
+
+  @property
+  def user_data(self):
+    """Return the user data for the hook function."""
+    return self.__user_data
+
+  def make_call(self, method, request, response,
+                get_result_hook=None, user_data=None):
+    """Initiate a call.
+
+    Args:
+      method: The method name.
+      request: The request protocol buffer.
+      response: The response protocol buffer.
+      get_result_hook: Optional get-result hook function.  If not None,
+        this must be a function with exactly one argument, the RPC
+        object (self).  Its return value is returned from get_result().
+      user_data: Optional additional arbitrary data for the get-result
+        hook function.  This can be accessed as rpc.user_data.  The
+        type of this value is up to the service module.
+
+    This function may only be called once per RPC object.  It sends
+    the request to the remote server, but does not wait for a
+    response.  This allows concurrent execution of the remote call and
+    further local processing (e.g., making additional remote calls).
+
+    Before the call is initiated, the precall hooks are called.
+    """
+    assert self.__rpc.state == apiproxy_rpc.RPC.IDLE, repr(self.state)
+    self.__method = method
+    self.__get_result_hook = get_result_hook
+    self.__user_data = user_data
+    apiproxy.GetPreCallHooks().Call(self.__service, method, request, response)
+    self.__rpc.MakeCall(self.__service, method, request, response)
+
+  def wait(self):
+    """Wait for the call to complete, and call callbacks.
+
+    This is the only time callback functions may be called.  (However,
+    note that check_success() and get_result() call wait().)   Waiting
+    for one RPC may cause callbacks for other RPCs to be called.
+    Callback functions may call check_success() and get_result().
+
+    Callbacks are called without arguments; if a callback needs access
+    to the RPC object a Python nested function (a.k.a. closure) or a
+    bound may be used.  To facilitate this, the callback may be
+    assigned after the RPC object is created (but before make_call()
+    is called).
+
+    Note: don't confuse callbacks with get-result hooks or precall
+    and postcall hooks.
+    """
+    assert self.__rpc.state != apiproxy_rpc.RPC.IDLE, repr(self.state)
+    if self.__rpc.state == apiproxy_rpc.RPC.RUNNING:
+      self.__rpc.Wait()
+    assert self.__rpc.state == apiproxy_rpc.RPC.FINISHING, repr(self.state)
+
+  def check_success(self):
+    """Check for success of the RPC, possibly raising an exception.
+
+    This function should be called at least once per RPC.  If wait()
+    hasn't been called yet, it is called first.  If the RPC caused
+    an exceptional condition, an exception will be raised here.
+    The first time check_success() is called, the postcall hooks
+    are called.
+    """
+    self.wait()
+    self.__rpc.CheckSuccess()
+    if not self.__postcall_hooks_called:
+      self.__postcall_hooks_called = True
+      apiproxy.GetPostCallHooks().Call(self.__service, self.__method,
+                                       self.request, self.response)
+
+  def get_result(self):
+    """Get the result of the RPC, or possibly raise an exception.
+
+    This implies a call to check_success().  If a get-result hook was
+    passed to make_call(), that hook is responsible for calling
+    check_success(), and the return value of the hook is returned.
+    Otherwise, check_success() is called directly and None is
+    returned.
+    """
+    if self.__get_result_hook is None:
+      self.check_success()
+      return None
+    else:
+      return self.__get_result_hook(self)
+
+
 def GetDefaultAPIProxy():
   try:
     runtime = __import__('google.appengine.runtime', globals(), locals(),
@@ -249,4 +450,5 @@
   except (AttributeError, ImportError):
     return APIProxyStubMap()
 
+
 apiproxy = GetDefaultAPIProxy()
--- a/thirdparty/google_appengine/google/appengine/api/datastore.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/api/datastore.py	Fri Jun 19 16:13:32 2009 +0200
@@ -178,6 +178,7 @@
 
   if tx:
     tx.RecordModifiedKeys([e.key() for e in entities], error_on_repeat=False)
+    tx.entity_group = entities[0].entity_group()
 
   if multiple:
     return [Key._FromPb(k) for k in keys]
@@ -358,7 +359,7 @@
 
   def unindexed_properties(self):
     """Returns this entity's unindexed properties, as a frozenset of strings."""
-    return self.__unindexed_properties
+    return getattr(self, '_Entity__unindexed_properties', [])
 
   def __setitem__(self, name, value):
     """Implements the [] operator. Used to set property value(s).
@@ -510,7 +511,7 @@
         sample = values[0]
 
       if (isinstance(sample, datastore_types._RAW_PROPERTY_TYPES) or
-          name in self.__unindexed_properties):
+          name in self.unindexed_properties()):
         pb.raw_property_list().extend(properties)
       else:
         pb.property_list().extend(properties)
@@ -860,9 +861,7 @@
       # this query
       Query
     """
-    key = _GetCompleteKeyOrError(ancestor)
-    self.__ancestor = datastore_pb.Reference()
-    self.__ancestor.CopyFrom(key._Key__reference)
+    self.__ancestor = _GetCompleteKeyOrError(ancestor)
     return self
 
   def IsKeysOnly(self):
@@ -894,10 +893,6 @@
     This is not intended to be used by application developers. Use Get()
     instead!
     """
-    if _CurrentTransactionKey():
-      raise datastore_errors.BadRequestError(
-        "Can't query inside a transaction.")
-
     pb = self._ToPb(limit, offset)
     result = datastore_pb.QueryResult()
 
@@ -912,7 +907,7 @@
         raise datastore_errors.NeedIndexError(
           str(exc) + '\nThis query needs this index:\n' + yaml)
 
-    return Iterator._FromPb(result)
+    return Iterator(result)
 
   def Get(self, limit, offset=0):
     """Fetches and returns a maximum number of results from the query.
@@ -961,7 +956,7 @@
           'Argument to Get named \'offset\' must be an int greater than or '
           'equal to 0; received %s (a %s)' % (offset, typename(offset)))
 
-    return self._Run(limit, offset)._Next(limit)
+    return self._Run(limit, offset)._Get(limit)
 
   def Count(self, limit=None):
     """Returns the number of entities that this query matches. The returned
@@ -1138,8 +1133,17 @@
     Returns:
       # the PB representation of this Query
       datastore_pb.Query
+
+    Raises:
+      BadRequestError if called inside a transaction and the query does not
+      include an ancestor.
     """
+    if not self.__ancestor and _CurrentTransactionKey():
+      raise datastore_errors.BadRequestError(
+        'Only ancestor queries are allowed inside transactions.')
+
     pb = datastore_pb.Query()
+    _MaybeSetupTransaction(pb, [self.__ancestor])
 
     pb.set_kind(self.__kind.encode('utf-8'))
     pb.set_keys_only(bool(self.__keys_only))
@@ -1150,7 +1154,7 @@
     if offset is not None:
       pb.set_offset(offset)
     if self.__ancestor:
-      pb.mutable_ancestor().CopyFrom(self.__ancestor)
+      pb.mutable_ancestor().CopyFrom(self.__ancestor._Key__reference)
 
     if ((self.__hint == self.ORDER_FIRST and self.__orderings) or
         (self.__hint == self.ANCESTOR_FIRST and self.__ancestor) or
@@ -1513,11 +1517,43 @@
   > for person in it:
   >   print 'Hi, %s!' % person['name']
   """
-  def __init__(self, cursor, keys_only=False):
-    self.__cursor = cursor
-    self.__buffer = []
-    self.__more_results = True
-    self.__keys_only = keys_only
+  def __init__(self, query_result_pb):
+    self.__cursor = query_result_pb.cursor()
+    self.__keys_only = query_result_pb.keys_only()
+    self.__buffer = self._ProcessQueryResult(query_result_pb)
+
+  def _Get(self, count):
+    """Gets the next count result(s) of the query.
+
+    Not intended to be used by application developers. Use the python
+    iterator protocol instead.
+
+    This method uses _Next to returns the next entities or keys from the list of
+    matching results. If the query specified a sort order, results are returned
+    in that order. Otherwise, the order is undefined.
+
+    The argument, count, specifies the number of results to return. However, the
+    length of the returned list may be smaller than count. This is the case only
+    if count is greater than the number of remaining results.
+
+    The results are always returned as a list. If there are no results left,
+    an empty list is returned.
+
+    Args:
+      # the number of results to return; must be >= 1
+      count: int or long
+
+    Returns:
+      # a list of entities or keys
+      [Entity or Key, ...]
+    """
+    entityList = self._Next(count)
+    while len(entityList) < count and self.__more_results:
+      next_results = self._Next(count - len(entityList))
+      if not next_results:
+        break
+      entityList += next_results
+    return entityList;
 
   def _Next(self, count):
     """Returns the next result(s) of the query.
@@ -1529,10 +1565,11 @@
     results. If the query specified a sort order, results are returned in that
     order. Otherwise, the order is undefined.
 
-    The argument specifies the number of results to return. If it's greater
-    than the number of remaining results, all of the remaining results are
-    returned. In that case, the length of the returned list will be smaller
-    than count.
+    The argument, count, specifies the number of results to return. However, the
+    length of the returned list may be smaller than count. This is the case if
+    count is greater than the number of remaining results or the size of the
+    remaining results exciteds the RPC buffer limit. Use _Get to insure all
+    possible entities are retrieved.
 
     There is an internal buffer for use with the next() method. If this buffer
     is not empty, up to 'count' values are removed from this buffer and
@@ -1555,21 +1592,49 @@
         (count, typename(count)))
 
     if self.__buffer:
-      raise datastore_errors.BadRequestError(
-          'You can\'t mix next() and _Next()')
+      if count <= len(self.__buffer):
+        entity_list = self.__buffer[:count]
+        del self.__buffer[:count]
+        return entity_list
+      else:
+        entity_list = self.__buffer
+        self.__buffer = []
+        count -= len(entity_list)
+    else:
+        entity_list=[]
 
     if not self.__more_results:
-      return []
+      return entity_list
 
     req = datastore_pb.NextRequest()
     req.set_count(count)
-    req.mutable_cursor().CopyFrom(self._ToPb())
+    req.mutable_cursor().CopyFrom(self.__cursor)
     result = datastore_pb.QueryResult()
     try:
       apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Next', req, result)
     except apiproxy_errors.ApplicationError, err:
       raise _ToDatastoreError(err)
 
+    return entity_list + self._ProcessQueryResult(result)
+
+  def _ProcessQueryResult(self, result):
+    """Returns all results from datastore_pb.QueryResult and updates
+    self.__more_results
+
+    Not intended to be used by application developers. Use the python
+    iterator protocol instead.
+
+    The results are always returned as a list. If there are no results left,
+    an empty list is returned.
+
+    Args:
+      # the instance of datastore_pb.QueryResult to be stored
+      result: datastore_pb.QueryResult
+
+    Returns:
+      # a list of entities or keys
+      [Entity or Key, ...]
+    """
     self.__more_results = result.more_results()
 
     if self.__keys_only:
@@ -1589,34 +1654,6 @@
 
   def __iter__(self): return self
 
-  def _ToPb(self):
-    """Converts this Iterator to its protocol buffer representation. Not
-    intended to be used by application developers. Enforced by hiding the
-    datastore_pb classes.
-
-    Returns:
-      # the PB representation of this Iterator
-      datastore_pb.Cursor
-    """
-    pb = datastore_pb.Cursor()
-    pb.set_cursor(self.__cursor)
-    return pb
-
-  @staticmethod
-  def _FromPb(pb):
-    """Static factory method. Returns the Iterator representation of the given
-    protocol buffer (datastore_pb.QueryResult). Not intended to be used by
-    application developers. Enforced by hiding the datastore_pb classes.
-
-    Args:
-      pb: datastore_pb.QueryResult
-
-    Returns:
-      Iterator
-    """
-    return Iterator(pb.cursor().cursor(), keys_only=pb.keys_only())
-
-
 class _Transaction(object):
   """Encapsulates a transaction currently in progress.
 
@@ -1640,8 +1677,6 @@
   def RecordModifiedKeys(self, keys, error_on_repeat=True):
     """Updates the modified keys seen so far.
 
-    Also sets entity_group if it hasn't yet been set.
-
     If error_on_repeat is True and any of the given keys have already been
     modified, raises BadRequestError.
 
@@ -1649,10 +1684,6 @@
       keys: sequence of Keys
     """
     keys, _ = NormalizeAndTypeCheckKeys(keys)
-
-    if keys and not self.entity_group:
-      self.entity_group = keys[0].entity_group()
-
     keys = set(keys)
 
     if error_on_repeat:
@@ -1834,14 +1865,14 @@
   current transaction.
 
   Args:
-    request: GetRequest, PutRequest, or DeleteRequest
+    request: GetRequest, PutRequest, DeleteRequest, or Query
     keys: sequence of Keys
 
   Returns:
     _Transaction if we're inside a transaction, otherwise None
   """
   assert isinstance(request, (datastore_pb.GetRequest, datastore_pb.PutRequest,
-                              datastore_pb.DeleteRequest))
+                              datastore_pb.DeleteRequest, datastore_pb.Query))
   tx_key = None
 
   try:
@@ -1854,6 +1885,7 @@
         expected_group = tx.entity_group
       else:
         expected_group = groups[0]
+
       for group in groups:
         if (group != expected_group or
 
@@ -1866,6 +1898,9 @@
             (not group.has_id_or_name() and group is not expected_group)):
           raise _DifferentEntityGroupError(expected_group, group)
 
+        if not tx.entity_group and group.has_id_or_name():
+          tx.entity_group = group
+
       if not tx.handle:
         tx.handle = datastore_pb.Transaction()
         req = api_base_pb.VoidProto()
--- a/thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py	Fri Jun 19 16:13:32 2009 +0200
@@ -508,13 +508,18 @@
 
 
   def _Dynamic_Get(self, get_request, get_response):
+    if get_request.has_transaction():
+      entities = self.__tx_snapshot
+    else:
+      entities = self.__entities
+
     for key in get_request.key_list():
       self.__ValidateAppId(key.app())
       app_kind = self._AppKindForKey(key)
 
       group = get_response.add_entity()
       try:
-        entity = self.__entities[app_kind][key].protobuf
+        entity = entities[app_kind][key].protobuf
       except KeyError:
         entity = None
 
@@ -545,9 +550,13 @@
 
   def _Dynamic_RunQuery(self, query, query_result):
     if not self.__tx_lock.acquire(False):
-      raise apiproxy_errors.ApplicationError(
-          datastore_pb.Error.BAD_REQUEST, 'Can\'t query inside a transaction.')
+      if not query.has_ancestor():
+        raise apiproxy_errors.ApplicationError(
+          datastore_pb.Error.BAD_REQUEST,
+          'Only ancestor queries are allowed inside transactions.')
+      entities = self.__tx_snapshot
     else:
+      entities = self.__entities
       self.__tx_lock.release()
 
     app = query.app()
@@ -598,7 +607,7 @@
 
     try:
       query.set_app(app)
-      results = self.__entities[app, query.kind()].values()
+      results = entities[app, query.kind()].values()
       results = [entity.native for entity in results]
     except KeyError:
       results = []
--- a/thirdparty/google_appengine/google/appengine/api/datastore_types.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/api/datastore_types.py	Fri Jun 19 16:13:32 2009 +0200
@@ -430,7 +430,7 @@
     """
     args = []
     for elem in self.__reference.path().element_list():
-      args.append(repr(elem.type()))
+      args.append(repr(elem.type().decode('utf-8')))
       if elem.has_name():
         args.append(repr(elem.name().decode('utf-8')))
       else:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/labs/__init__.py	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/labs/taskqueue/__init__.py	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Task Queue API module."""
+
+from taskqueue import *
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/labs/taskqueue/taskqueue.py	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,637 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Task Queue API.
+
+Enables an application to queue background work for itself. Work is done through
+webhooks that process tasks pushed from a queue. Tasks will execute in
+best-effort order of ETA. Webhooks that fail will cause tasks to be retried at a
+later time. Multiple queues may exist with independent throttling controls.
+
+Webhook URLs may be specified directly for Tasks, or the default URL scheme
+may be used, which will translate Task names into URLs relative to a Queue's
+base path. A default queue is also provided for simple usage.
+"""
+
+
+
+import datetime
+import re
+import time
+import urllib
+import urlparse
+
+import taskqueue_service_pb
+
+from google.appengine.api import apiproxy_stub_map
+from google.appengine.api import urlfetch
+from google.appengine.runtime import apiproxy_errors
+
+
+class Error(Exception):
+  """Base-class for exceptions in this module."""
+
+
+class UnknownQueueError(Error):
+  """The queue specified is unknown."""
+
+
+class TransientError(Error):
+  """There was a transient error while accessing the queue.
+
+  Please Try again later.
+  """
+
+
+class InternalError(Error):
+  """There was an internal error while accessing this queue.
+
+  If this problem continues, please contact the App Engine team through
+  our support forum with a description of your problem.
+  """
+
+
+class InvalidTaskError(Error):
+  """The task's parameters, headers, or method is invalid."""
+
+
+class InvalidTaskNameError(InvalidTaskError):
+  """The task's name is invalid."""
+
+
+class TaskTooLargeError(InvalidTaskError):
+  """The task is too large with its headers and payload."""
+
+
+class TaskAlreadyExistsError(InvalidTaskError):
+  """Task already exists. It has not yet run."""
+
+
+class TombstonedTaskError(InvalidTaskError):
+  """Task has been tombstoned."""
+
+
+class InvalidUrlError(InvalidTaskError):
+  """The task's relative URL is invalid."""
+
+
+class BadTaskStateError(Error):
+  """The task is in the wrong state for the requested operation."""
+
+
+class InvalidQueueError(Error):
+  """The Queue's configuration is invalid."""
+
+
+class InvalidQueueNameError(InvalidQueueError):
+  """The Queue's name is invalid."""
+
+
+class _RelativeUrlError(Error):
+  """The relative URL supplied is invalid."""
+
+
+class PermissionDeniedError(Error):
+  """The requested operation is not allowed for this app."""
+
+
+MAX_QUEUE_NAME_LENGTH = 100
+
+MAX_TASK_NAME_LENGTH = 500
+
+MAX_TASK_SIZE_BYTES = 10 * (2 ** 10)
+
+MAX_URL_LENGTH = 2083
+
+_DEFAULT_QUEUE = 'default'
+
+_DEFAULT_QUEUE_PATH = '/_ah/queue'
+
+_METHOD_MAP = {
+    'GET': taskqueue_service_pb.TaskQueueAddRequest.GET,
+    'POST': taskqueue_service_pb.TaskQueueAddRequest.POST,
+    'HEAD': taskqueue_service_pb.TaskQueueAddRequest.HEAD,
+    'PUT': taskqueue_service_pb.TaskQueueAddRequest.PUT,
+    'DELETE': taskqueue_service_pb.TaskQueueAddRequest.DELETE,
+}
+
+_NON_POST_METHODS = frozenset(['GET', 'HEAD', 'PUT', 'DELETE'])
+
+_BODY_METHODS = frozenset(['POST', 'PUT'])
+
+_TASK_NAME_PATTERN = r'^[a-zA-Z0-9-]{1,%s}$' % MAX_TASK_NAME_LENGTH
+
+_TASK_NAME_RE = re.compile(_TASK_NAME_PATTERN)
+
+_QUEUE_NAME_PATTERN = r'^[a-zA-Z0-9-]{1,%s}$' % MAX_QUEUE_NAME_LENGTH
+
+_QUEUE_NAME_RE = re.compile(_QUEUE_NAME_PATTERN)
+
+
+class _UTCTimeZone(datetime.tzinfo):
+  """UTC timezone."""
+
+  ZERO = datetime.timedelta(0)
+
+  def utcoffset(self, dt):
+    return self.ZERO
+
+  def dst(self, dt):
+    return self.ZERO
+
+  def tzname(self, dt):
+    return 'UTC'
+
+
+_UTC = _UTCTimeZone()
+
+
+def _parse_relative_url(relative_url):
+  """Parses a relative URL and splits it into its path and query string.
+
+  Args:
+    relative_url: The relative URL, starting with a '/'.
+
+  Returns:
+    Tuple (path, query) where:
+      path: The path in the relative URL.
+      query: The query string in the URL without the '?' character.
+
+  Raises:
+    _RelativeUrlError if the relative_url is invalid for whatever reason
+  """
+  if not relative_url:
+    raise _RelativeUrlError('Relative URL is empty')
+  (scheme, netloc, path, query, fragment) = urlparse.urlsplit(relative_url)
+  if scheme or netloc:
+    raise _RelativeUrlError('Relative URL may not have a scheme or location')
+  if fragment:
+    raise _RelativeUrlError('Relative URL may not specify a fragment')
+  if not path or path[0] != '/':
+    raise _RelativeUrlError('Relative URL path must start with "/"')
+  return path, query
+
+
+def _flatten_params(params):
+  """Converts a dictionary of parameters to a list of parameters.
+
+  Any unicode strings in keys or values will be encoded as UTF-8.
+
+  Args:
+    params: Dictionary mapping parameter keys to values. Values will be
+      converted to a string and added to the list as tuple (key, value). If
+      a values is iterable and not a string, each contained value will be
+      added as a separate (key, value) tuple.
+
+  Returns:
+    List of (key, value) tuples.
+  """
+  def get_string(value):
+    if isinstance(value, unicode):
+      return unicode(value).encode('utf-8')
+    else:
+      return str(value)
+
+  param_list = []
+  for key, value in params.iteritems():
+    key = get_string(key)
+    if isinstance(value, basestring):
+      param_list.append((key, get_string(value)))
+    else:
+      try:
+        iterator = iter(value)
+      except TypeError:
+        param_list.append((key, str(value)))
+      else:
+        param_list.extend((key, get_string(v)) for v in iterator)
+
+  return param_list
+
+
+class Task(object):
+  """Represents a single Task on a queue."""
+
+  __CONSTRUCTOR_KWARGS = frozenset([
+      'countdown', 'eta', 'headers', 'method', 'name', 'params', 'url'])
+
+  def __init__(self, payload=None, **kwargs):
+    """Initializer.
+
+    All parameters are optional.
+
+    Args:
+      payload: The payload data for this Task that will be delivered to the
+        webhook as the HTTP request body. This is only allowed for POST and PUT
+        methods.
+      countdown: Time in seconds into the future that this Task should execute.
+        Defaults to zero.
+      eta: Absolute time when the Task should execute. May not be specified
+        if 'countdown' is also supplied.
+      headers: Dictionary of headers to pass to the webhook. Values in the
+        dictionary may be iterable to indicate repeated header fields.
+      method: Method to use when accessing the webhook. Defaults to 'POST'.
+      name: Name to give the Task; if not specified, a name will be
+        auto-generated when added to a queue and assigned to this object. Must
+        match the _TASK_NAME_PATTERN regular expression.
+      params: Dictionary of parameters to use for this Task. For POST requests
+        these params will be encoded as 'application/x-www-form-urlencoded' and
+        set to the payload. For all other methods, the parameters will be
+        converted to a query string. May not be specified if the URL already
+        contains a query string.
+      url: Relative URL where the webhook that should handle this task is
+        located for this application. May have a query string unless this is
+        a POST method.
+
+    Raises:
+      InvalidTaskError if any of the parameters are invalid;
+      InvalidTaskNameError if the task name is invalid; InvalidUrlError if
+      the task URL is invalid or too long; TaskTooLargeError if the task with
+      its payload is too large.
+    """
+    args_diff = set(kwargs.iterkeys()) - self.__CONSTRUCTOR_KWARGS
+    if args_diff:
+      raise TypeError('Invalid arguments: %s' % ', '.join(args_diff))
+
+    self.__name = kwargs.get('name')
+    if self.__name and not _TASK_NAME_RE.match(self.__name):
+      raise InvalidTaskNameError(
+          'Task name does not match expression "%s"; found %s' %
+          (_TASK_NAME_PATTERN, self.__name))
+
+    self.__default_url, self.__relative_url, query = Task.__determine_url(
+        kwargs.get('url', ''))
+    self.__headers = urlfetch._CaselessDict()
+    self.__headers.update(kwargs.get('headers', {}))
+    self.__method = kwargs.get('method', 'POST').upper()
+    self.__payload = None
+    params = kwargs.get('params', {})
+
+    if query and params:
+      raise InvalidTaskError('Query string and parameters both present; '
+                             'only one of these may be supplied')
+
+    if self.__method == 'POST':
+      if payload and params:
+        raise InvalidTaskError('Message body and parameters both present for '
+                               'POST method; only one of these may be supplied')
+      elif query:
+        raise InvalidTaskError('POST method may not have a query string; '
+                               'use the "params" keyword argument instead')
+      elif params:
+        self.__payload = Task.__encode_params(params)
+        self.__headers.setdefault(
+            'content-type', 'application/x-www-form-urlencoded')
+      elif payload is not None:
+        self.__payload = Task.__convert_payload(payload, self.__headers)
+    elif self.__method in _NON_POST_METHODS:
+      if payload and self.__method not in _BODY_METHODS:
+        raise InvalidTaskError('Payload may only be specified for methods %s' %
+                               ', '.join(_BODY_METHODS))
+      if payload:
+        self.__payload = Task.__convert_payload(payload, self.__headers)
+      if params:
+        query = Task.__encode_params(params)
+      if query:
+        self.__relative_url = '%s?%s' % (self.__relative_url, query)
+    else:
+      raise InvalidTaskError('Invalid method: %s' % self.__method)
+
+    self.__headers_list = _flatten_params(self.__headers)
+    self.__eta = Task.__determine_eta(
+        kwargs.get('eta'), kwargs.get('countdown'))
+    self.__enqueued = False
+
+    if self.size > MAX_TASK_SIZE_BYTES:
+      raise TaskTooLargeError('Task size must be less than %d; found %d' %
+                              (MAX_TASK_SIZE_BYTES, self.size))
+
+  @staticmethod
+  def __determine_url(relative_url):
+    """Determines the URL of a task given a relative URL and a name.
+
+    Args:
+      relative_url: The relative URL for the Task.
+
+    Returns:
+      Tuple (default_url, relative_url, query) where:
+        default_url: True if this Task is using the default URL scheme;
+          False otherwise.
+        relative_url: String containing the relative URL for this Task.
+        query: The query string for this task.
+
+    Raises:
+      InvalidUrlError if the relative_url is invalid.
+    """
+    if not relative_url:
+      default_url, query = True, ''
+    else:
+      default_url = False
+      try:
+        relative_url, query = _parse_relative_url(relative_url)
+      except _RelativeUrlError, e:
+        raise InvalidUrlError(e)
+
+    if len(relative_url) > MAX_URL_LENGTH:
+      raise InvalidUrlError(
+          'Task URL must be less than %d characters; found %d' %
+          (MAX_URL_LENGTH, len(relative_url)))
+
+    return (default_url, relative_url, query)
+
+  @staticmethod
+  def __determine_eta(eta=None, countdown=None, now=datetime.datetime.now):
+    """Determines the ETA for a task.
+
+    If 'eta' and 'countdown' are both None, the current time will be used.
+    Otherwise, only one of them may be specified.
+
+    Args:
+      eta: A datetime.datetime specifying the absolute ETA or None
+      countdown: Count in seconds into the future from the present time that
+        the ETA should be assigned to.
+
+    Returns:
+      A datetime in the UTC timezone containing the ETA.
+
+    Raises:
+      InvalidTaskError if the parameters are invalid.
+    """
+    if eta is not None and countdown is not None:
+      raise InvalidTaskError('May not use a countdown and ETA together')
+    elif eta is not None:
+      if not isinstance(eta, datetime.datetime):
+        raise InvalidTaskError('ETA must be a datetime.datetime instance')
+    elif countdown is not None:
+      try:
+        countdown = float(countdown)
+      except ValueError:
+        raise InvalidTaskError('Countdown must be a number')
+      else:
+        eta = now() + datetime.timedelta(seconds=countdown)
+    else:
+      eta = now()
+
+    if eta.tzinfo is None:
+      eta = eta.replace(tzinfo=_UTC)
+    return eta.astimezone(_UTC)
+
+  @staticmethod
+  def __encode_params(params):
+    """URL-encodes a list of parameters.
+
+    Args:
+      params: Dictionary of parameters, possibly with iterable values.
+
+    Returns:
+      URL-encoded version of the params, ready to be added to a query string or
+      POST body.
+    """
+    return urllib.urlencode(_flatten_params(params))
+
+  @staticmethod
+  def __convert_payload(payload, headers):
+    """Converts a Task payload into UTF-8 and sets headers if necessary.
+
+    Args:
+      payload: The payload data to convert.
+      headers: Dictionary of headers.
+
+    Returns:
+      The payload as a non-unicode string.
+
+    Raises:
+      InvalidTaskError if the payload is not a string or unicode instance.
+    """
+    if isinstance(payload, unicode):
+      headers.setdefault('content-type', 'text/plain; charset=utf-8')
+      payload = payload.encode('utf-8')
+    elif not isinstance(payload, str):
+      raise InvalidTaskError(
+          'Task payloads must be strings; invalid payload: %r' % payload)
+    return payload
+
+  @property
+  def on_queue_url(self):
+    """Returns True if this Task will run on the queue's URL."""
+    return self.__default_url
+
+  @property
+  def eta(self):
+    """Returns an datetime corresponding to when this Task will execute."""
+    return self.__eta
+
+  @property
+  def headers(self):
+    """Returns a copy of the headers for this Task."""
+    return self.__headers.copy()
+
+  @property
+  def method(self):
+    """Returns the method to use for this Task."""
+    return self.__method
+
+  @property
+  def name(self):
+    """Returns the name of this Task.
+
+    Will be None if using auto-assigned Task names and this Task has not yet
+    been added to a Queue.
+    """
+    return self.__name
+
+  @property
+  def payload(self):
+    """Returns the payload for this task, which may be None."""
+    return self.__payload
+
+  @property
+  def size(self):
+    """Returns the size of this task in bytes."""
+    HEADER_SEPERATOR = len(': \r\n')
+    header_size = sum((len(key) + len(value) + HEADER_SEPERATOR)
+                      for key, value in self.__headers_list)
+    return (len(self.__method) + len(self.__payload or '') +
+            len(self.__relative_url) + header_size)
+
+  @property
+  def url(self):
+    """Returns the relative URL for this Task."""
+    return self.__relative_url
+
+  @property
+  def was_enqueued(self):
+    """Returns True if this Task has been enqueued.
+
+    Note: This will not check if this task already exists in the queue.
+    """
+    return self.__enqueued
+
+  def add(self, queue_name=_DEFAULT_QUEUE):
+    """Adds this Task to a queue.
+
+    Args:
+      queue_name: Name of the queue to add this Task to. (optional)
+
+    Returns:
+      This Task itself.
+
+    Raises:
+      BadTaskStateError if this task has already been enqueued.
+    """
+    return Queue(queue_name).add(self)
+
+
+class Queue(object):
+  """Represents a Queue."""
+
+  def __init__(self, name=_DEFAULT_QUEUE):
+    """Initializer.
+
+    Args:
+      name: Name of this queue. If not supplied, defaults to the default queue.
+
+    Raises:
+      InvalidQueueNameError if the queue name is invalid.
+    """
+    if not _QUEUE_NAME_RE.match(name):
+      raise InvalidQueueNameError(
+          'Queue name does not match pattern "%s"; found %s' %
+          (_QUEUE_NAME_PATTERN, name))
+    self.__name = name
+    self.__url = '%s/%s' % (_DEFAULT_QUEUE_PATH, self.__name)
+
+  def add(self, task):
+    """Adds a Task to this Queue.
+
+    Args:
+      task: The Task to add.
+
+    Returns:
+      The Task that was supplied to this method.
+
+    Raises:
+      BadTaskStateError if the Task has already been added to a queue.
+      Error-subclass on application errors.
+    """
+    if task.was_enqueued:
+      raise BadTaskStateError('Task has already been enqueued')
+
+    request = taskqueue_service_pb.TaskQueueAddRequest()
+    response = taskqueue_service_pb.TaskQueueAddResponse()
+
+    adjusted_url = task.url
+    if task.on_queue_url:
+      adjusted_url = self.__url + task.url
+
+
+    request.set_queue_name(self.__name)
+    request.set_eta_usec(int(time.mktime(task.eta.utctimetuple())) * 10**6)
+    request.set_method(_METHOD_MAP.get(task.method))
+    request.set_url(adjusted_url)
+
+    if task.name:
+      request.set_task_name(task.name)
+    else:
+      request.set_task_name('')
+
+    if task.payload:
+      request.set_body(task.payload)
+    for key, value in _flatten_params(task.headers):
+      header = request.add_header()
+      header.set_key(key)
+      header.set_value(value)
+
+    call_tuple = ('taskqueue', 'Add', request, response)
+    apiproxy_stub_map.apiproxy.GetPreCallHooks().Call(*call_tuple)
+    try:
+      apiproxy_stub_map.MakeSyncCall(*call_tuple)
+    except apiproxy_errors.ApplicationError, e:
+      self.__TranslateError(e)
+    else:
+      apiproxy_stub_map.apiproxy.GetPostCallHooks().Call(*call_tuple)
+
+    if response.has_chosen_task_name():
+      task._Task__name = response.chosen_task_name()
+    task._Task__enqueued = True
+    return task
+
+  @property
+  def name(self):
+    """Returns the name of this queue."""
+    return self.__name
+
+  @staticmethod
+  def __TranslateError(error):
+    """Translates a TaskQueueServiceError into an exception.
+
+    Args:
+      error: Value from TaskQueueServiceError enum.
+
+    Raises:
+      The corresponding Exception sub-class for that error code.
+    """
+    if (error.application_error ==
+        taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE):
+      raise UnknownQueueError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR):
+      raise TransientError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR):
+      raise InternalError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE):
+      raise TaskTooLargeError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.INVALID_TASK_NAME):
+      raise InvalidTaskNameError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME):
+      raise InvalidQueueNameError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.INVALID_URL):
+      raise InvalidUrlError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_RATE):
+      raise InvalidQueueError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED):
+      raise PermissionDeniedError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS):
+      raise TaskAlreadyExistsError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK):
+      raise TombstonedTaskError(error.error_detail)
+    elif (error.application_error ==
+          taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA):
+      raise InvalidTaskError(error.error_detail)
+    else:
+      raise Error('Application error %s: %s' %
+                  (error.application_error, error.error_detail))
+
+
+def add(*args, **kwargs):
+  """Convenience method will create a Task and add it to the default queue.
+
+  Args:
+    *args, **kwargs: Passed to the Task constructor.
+
+  Returns:
+    The Task that was added to the queue.
+  """
+  return Task(*args, **kwargs).add()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/labs/taskqueue/taskqueue_service_pb.py	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,1590 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.net.proto import ProtocolBuffer
+import array
+import dummy_thread as thread
+
+__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
+                   unusednames=printElemNumber,debug_strs no-special"""
+
+class TaskQueueServiceError(ProtocolBuffer.ProtocolMessage):
+
+  OK           =    0
+  UNKNOWN_QUEUE =    1
+  TRANSIENT_ERROR =    2
+  INTERNAL_ERROR =    3
+  TASK_TOO_LARGE =    4
+  INVALID_TASK_NAME =    5
+  INVALID_QUEUE_NAME =    6
+  INVALID_URL  =    7
+  INVALID_QUEUE_RATE =    8
+  PERMISSION_DENIED =    9
+  TASK_ALREADY_EXISTS =   10
+  TOMBSTONED_TASK =   11
+  INVALID_ETA  =   12
+
+  _ErrorCode_NAMES = {
+    0: "OK",
+    1: "UNKNOWN_QUEUE",
+    2: "TRANSIENT_ERROR",
+    3: "INTERNAL_ERROR",
+    4: "TASK_TOO_LARGE",
+    5: "INVALID_TASK_NAME",
+    6: "INVALID_QUEUE_NAME",
+    7: "INVALID_URL",
+    8: "INVALID_QUEUE_RATE",
+    9: "PERMISSION_DENIED",
+    10: "TASK_ALREADY_EXISTS",
+    11: "TOMBSTONED_TASK",
+    12: "INVALID_ETA",
+  }
+
+  def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
+  ErrorCode_Name = classmethod(ErrorCode_Name)
+
+
+  def __init__(self, contents=None):
+    pass
+    if contents is not None: self.MergeFromString(contents)
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+
+  def Equals(self, x):
+    if x is self: return 1
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    return n + 0
+
+  def Clear(self):
+    pass
+
+  def OutputUnchecked(self, out):
+    pass
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    return res
+
+
+  _TEXT = (
+   "ErrorCode",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TaskQueueAddRequest_Header(ProtocolBuffer.ProtocolMessage):
+  has_key_ = 0
+  key_ = ""
+  has_value_ = 0
+  value_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def key(self): return self.key_
+
+  def set_key(self, x):
+    self.has_key_ = 1
+    self.key_ = x
+
+  def clear_key(self):
+    if self.has_key_:
+      self.has_key_ = 0
+      self.key_ = ""
+
+  def has_key(self): return self.has_key_
+
+  def value(self): return self.value_
+
+  def set_value(self, x):
+    self.has_value_ = 1
+    self.value_ = x
+
+  def clear_value(self):
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = ""
+
+  def has_value(self): return self.has_value_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_key()): self.set_key(x.key())
+    if (x.has_value()): self.set_value(x.value())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_key_ != x.has_key_: return 0
+    if self.has_key_ and self.key_ != x.key_: return 0
+    if self.has_value_ != x.has_value_: return 0
+    if self.has_value_ and self.value_ != x.value_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_key_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: key not set.')
+    if (not self.has_value_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: value not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.key_))
+    n += self.lengthString(len(self.value_))
+    return n + 2
+
+  def Clear(self):
+    self.clear_key()
+    self.clear_value()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(58)
+    out.putPrefixedString(self.key_)
+    out.putVarInt32(66)
+    out.putPrefixedString(self.value_)
+
+  def TryMerge(self, d):
+    while 1:
+      tt = d.getVarInt32()
+      if tt == 52: break
+      if tt == 58:
+        self.set_key(d.getPrefixedString())
+        continue
+      if tt == 66:
+        self.set_value(d.getPrefixedString())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
+    if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
+    return res
+
+class TaskQueueAddRequest(ProtocolBuffer.ProtocolMessage):
+
+  GET          =    1
+  POST         =    2
+  HEAD         =    3
+  PUT          =    4
+  DELETE       =    5
+
+  _RequestMethod_NAMES = {
+    1: "GET",
+    2: "POST",
+    3: "HEAD",
+    4: "PUT",
+    5: "DELETE",
+  }
+
+  def RequestMethod_Name(cls, x): return cls._RequestMethod_NAMES.get(x, "")
+  RequestMethod_Name = classmethod(RequestMethod_Name)
+
+  has_queue_name_ = 0
+  queue_name_ = ""
+  has_task_name_ = 0
+  task_name_ = ""
+  has_eta_usec_ = 0
+  eta_usec_ = 0
+  has_method_ = 0
+  method_ = 2
+  has_url_ = 0
+  url_ = ""
+  has_body_ = 0
+  body_ = ""
+
+  def __init__(self, contents=None):
+    self.header_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def queue_name(self): return self.queue_name_
+
+  def set_queue_name(self, x):
+    self.has_queue_name_ = 1
+    self.queue_name_ = x
+
+  def clear_queue_name(self):
+    if self.has_queue_name_:
+      self.has_queue_name_ = 0
+      self.queue_name_ = ""
+
+  def has_queue_name(self): return self.has_queue_name_
+
+  def task_name(self): return self.task_name_
+
+  def set_task_name(self, x):
+    self.has_task_name_ = 1
+    self.task_name_ = x
+
+  def clear_task_name(self):
+    if self.has_task_name_:
+      self.has_task_name_ = 0
+      self.task_name_ = ""
+
+  def has_task_name(self): return self.has_task_name_
+
+  def eta_usec(self): return self.eta_usec_
+
+  def set_eta_usec(self, x):
+    self.has_eta_usec_ = 1
+    self.eta_usec_ = x
+
+  def clear_eta_usec(self):
+    if self.has_eta_usec_:
+      self.has_eta_usec_ = 0
+      self.eta_usec_ = 0
+
+  def has_eta_usec(self): return self.has_eta_usec_
+
+  def method(self): return self.method_
+
+  def set_method(self, x):
+    self.has_method_ = 1
+    self.method_ = x
+
+  def clear_method(self):
+    if self.has_method_:
+      self.has_method_ = 0
+      self.method_ = 2
+
+  def has_method(self): return self.has_method_
+
+  def url(self): return self.url_
+
+  def set_url(self, x):
+    self.has_url_ = 1
+    self.url_ = x
+
+  def clear_url(self):
+    if self.has_url_:
+      self.has_url_ = 0
+      self.url_ = ""
+
+  def has_url(self): return self.has_url_
+
+  def header_size(self): return len(self.header_)
+  def header_list(self): return self.header_
+
+  def header(self, i):
+    return self.header_[i]
+
+  def mutable_header(self, i):
+    return self.header_[i]
+
+  def add_header(self):
+    x = TaskQueueAddRequest_Header()
+    self.header_.append(x)
+    return x
+
+  def clear_header(self):
+    self.header_ = []
+  def body(self): return self.body_
+
+  def set_body(self, x):
+    self.has_body_ = 1
+    self.body_ = x
+
+  def clear_body(self):
+    if self.has_body_:
+      self.has_body_ = 0
+      self.body_ = ""
+
+  def has_body(self): return self.has_body_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_queue_name()): self.set_queue_name(x.queue_name())
+    if (x.has_task_name()): self.set_task_name(x.task_name())
+    if (x.has_eta_usec()): self.set_eta_usec(x.eta_usec())
+    if (x.has_method()): self.set_method(x.method())
+    if (x.has_url()): self.set_url(x.url())
+    for i in xrange(x.header_size()): self.add_header().CopyFrom(x.header(i))
+    if (x.has_body()): self.set_body(x.body())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_queue_name_ != x.has_queue_name_: return 0
+    if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
+    if self.has_task_name_ != x.has_task_name_: return 0
+    if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
+    if self.has_eta_usec_ != x.has_eta_usec_: return 0
+    if self.has_eta_usec_ and self.eta_usec_ != x.eta_usec_: return 0
+    if self.has_method_ != x.has_method_: return 0
+    if self.has_method_ and self.method_ != x.method_: return 0
+    if self.has_url_ != x.has_url_: return 0
+    if self.has_url_ and self.url_ != x.url_: return 0
+    if len(self.header_) != len(x.header_): return 0
+    for e1, e2 in zip(self.header_, x.header_):
+      if e1 != e2: return 0
+    if self.has_body_ != x.has_body_: return 0
+    if self.has_body_ and self.body_ != x.body_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_queue_name_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: queue_name not set.')
+    if (not self.has_task_name_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: task_name not set.')
+    if (not self.has_eta_usec_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: eta_usec not set.')
+    if (not self.has_url_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: url not set.')
+    for p in self.header_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.queue_name_))
+    n += self.lengthString(len(self.task_name_))
+    n += self.lengthVarInt64(self.eta_usec_)
+    if (self.has_method_): n += 1 + self.lengthVarInt64(self.method_)
+    n += self.lengthString(len(self.url_))
+    n += 2 * len(self.header_)
+    for i in xrange(len(self.header_)): n += self.header_[i].ByteSize()
+    if (self.has_body_): n += 1 + self.lengthString(len(self.body_))
+    return n + 4
+
+  def Clear(self):
+    self.clear_queue_name()
+    self.clear_task_name()
+    self.clear_eta_usec()
+    self.clear_method()
+    self.clear_url()
+    self.clear_header()
+    self.clear_body()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.queue_name_)
+    out.putVarInt32(18)
+    out.putPrefixedString(self.task_name_)
+    out.putVarInt32(24)
+    out.putVarInt64(self.eta_usec_)
+    out.putVarInt32(34)
+    out.putPrefixedString(self.url_)
+    if (self.has_method_):
+      out.putVarInt32(40)
+      out.putVarInt32(self.method_)
+    for i in xrange(len(self.header_)):
+      out.putVarInt32(51)
+      self.header_[i].OutputUnchecked(out)
+      out.putVarInt32(52)
+    if (self.has_body_):
+      out.putVarInt32(74)
+      out.putPrefixedString(self.body_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_queue_name(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.set_task_name(d.getPrefixedString())
+        continue
+      if tt == 24:
+        self.set_eta_usec(d.getVarInt64())
+        continue
+      if tt == 34:
+        self.set_url(d.getPrefixedString())
+        continue
+      if tt == 40:
+        self.set_method(d.getVarInt32())
+        continue
+      if tt == 51:
+        self.add_header().TryMerge(d)
+        continue
+      if tt == 74:
+        self.set_body(d.getPrefixedString())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
+    if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
+    if self.has_eta_usec_: res+=prefix+("eta_usec: %s\n" % self.DebugFormatInt64(self.eta_usec_))
+    if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatInt32(self.method_))
+    if self.has_url_: res+=prefix+("url: %s\n" % self.DebugFormatString(self.url_))
+    cnt=0
+    for e in self.header_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("Header%s {\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+"}\n"
+      cnt+=1
+    if self.has_body_: res+=prefix+("body: %s\n" % self.DebugFormatString(self.body_))
+    return res
+
+  kqueue_name = 1
+  ktask_name = 2
+  keta_usec = 3
+  kmethod = 5
+  kurl = 4
+  kHeaderGroup = 6
+  kHeaderkey = 7
+  kHeadervalue = 8
+  kbody = 9
+
+  _TEXT = (
+   "ErrorCode",
+   "queue_name",
+   "task_name",
+   "eta_usec",
+   "url",
+   "method",
+   "Header",
+   "key",
+   "value",
+   "body",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.STARTGROUP,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TaskQueueAddResponse(ProtocolBuffer.ProtocolMessage):
+  has_chosen_task_name_ = 0
+  chosen_task_name_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def chosen_task_name(self): return self.chosen_task_name_
+
+  def set_chosen_task_name(self, x):
+    self.has_chosen_task_name_ = 1
+    self.chosen_task_name_ = x
+
+  def clear_chosen_task_name(self):
+    if self.has_chosen_task_name_:
+      self.has_chosen_task_name_ = 0
+      self.chosen_task_name_ = ""
+
+  def has_chosen_task_name(self): return self.has_chosen_task_name_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_chosen_task_name()): self.set_chosen_task_name(x.chosen_task_name())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_chosen_task_name_ != x.has_chosen_task_name_: return 0
+    if self.has_chosen_task_name_ and self.chosen_task_name_ != x.chosen_task_name_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    if (self.has_chosen_task_name_): n += 1 + self.lengthString(len(self.chosen_task_name_))
+    return n + 0
+
+  def Clear(self):
+    self.clear_chosen_task_name()
+
+  def OutputUnchecked(self, out):
+    if (self.has_chosen_task_name_):
+      out.putVarInt32(10)
+      out.putPrefixedString(self.chosen_task_name_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_chosen_task_name(d.getPrefixedString())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_chosen_task_name_: res+=prefix+("chosen_task_name: %s\n" % self.DebugFormatString(self.chosen_task_name_))
+    return res
+
+  kchosen_task_name = 1
+
+  _TEXT = (
+   "ErrorCode",
+   "chosen_task_name",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TaskQueueUpdateQueueRequest(ProtocolBuffer.ProtocolMessage):
+  has_app_id_ = 0
+  app_id_ = ""
+  has_queue_name_ = 0
+  queue_name_ = ""
+  has_bucket_refill_per_second_ = 0
+  bucket_refill_per_second_ = 0.0
+  has_bucket_capacity_ = 0
+  bucket_capacity_ = 0
+  has_user_specified_rate_ = 0
+  user_specified_rate_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def app_id(self): return self.app_id_
+
+  def set_app_id(self, x):
+    self.has_app_id_ = 1
+    self.app_id_ = x
+
+  def clear_app_id(self):
+    if self.has_app_id_:
+      self.has_app_id_ = 0
+      self.app_id_ = ""
+
+  def has_app_id(self): return self.has_app_id_
+
+  def queue_name(self): return self.queue_name_
+
+  def set_queue_name(self, x):
+    self.has_queue_name_ = 1
+    self.queue_name_ = x
+
+  def clear_queue_name(self):
+    if self.has_queue_name_:
+      self.has_queue_name_ = 0
+      self.queue_name_ = ""
+
+  def has_queue_name(self): return self.has_queue_name_
+
+  def bucket_refill_per_second(self): return self.bucket_refill_per_second_
+
+  def set_bucket_refill_per_second(self, x):
+    self.has_bucket_refill_per_second_ = 1
+    self.bucket_refill_per_second_ = x
+
+  def clear_bucket_refill_per_second(self):
+    if self.has_bucket_refill_per_second_:
+      self.has_bucket_refill_per_second_ = 0
+      self.bucket_refill_per_second_ = 0.0
+
+  def has_bucket_refill_per_second(self): return self.has_bucket_refill_per_second_
+
+  def bucket_capacity(self): return self.bucket_capacity_
+
+  def set_bucket_capacity(self, x):
+    self.has_bucket_capacity_ = 1
+    self.bucket_capacity_ = x
+
+  def clear_bucket_capacity(self):
+    if self.has_bucket_capacity_:
+      self.has_bucket_capacity_ = 0
+      self.bucket_capacity_ = 0
+
+  def has_bucket_capacity(self): return self.has_bucket_capacity_
+
+  def user_specified_rate(self): return self.user_specified_rate_
+
+  def set_user_specified_rate(self, x):
+    self.has_user_specified_rate_ = 1
+    self.user_specified_rate_ = x
+
+  def clear_user_specified_rate(self):
+    if self.has_user_specified_rate_:
+      self.has_user_specified_rate_ = 0
+      self.user_specified_rate_ = ""
+
+  def has_user_specified_rate(self): return self.has_user_specified_rate_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_app_id()): self.set_app_id(x.app_id())
+    if (x.has_queue_name()): self.set_queue_name(x.queue_name())
+    if (x.has_bucket_refill_per_second()): self.set_bucket_refill_per_second(x.bucket_refill_per_second())
+    if (x.has_bucket_capacity()): self.set_bucket_capacity(x.bucket_capacity())
+    if (x.has_user_specified_rate()): self.set_user_specified_rate(x.user_specified_rate())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_app_id_ != x.has_app_id_: return 0
+    if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
+    if self.has_queue_name_ != x.has_queue_name_: return 0
+    if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
+    if self.has_bucket_refill_per_second_ != x.has_bucket_refill_per_second_: return 0
+    if self.has_bucket_refill_per_second_ and self.bucket_refill_per_second_ != x.bucket_refill_per_second_: return 0
+    if self.has_bucket_capacity_ != x.has_bucket_capacity_: return 0
+    if self.has_bucket_capacity_ and self.bucket_capacity_ != x.bucket_capacity_: return 0
+    if self.has_user_specified_rate_ != x.has_user_specified_rate_: return 0
+    if self.has_user_specified_rate_ and self.user_specified_rate_ != x.user_specified_rate_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_app_id_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: app_id not set.')
+    if (not self.has_queue_name_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: queue_name not set.')
+    if (not self.has_bucket_refill_per_second_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: bucket_refill_per_second not set.')
+    if (not self.has_bucket_capacity_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: bucket_capacity not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.app_id_))
+    n += self.lengthString(len(self.queue_name_))
+    n += self.lengthVarInt64(self.bucket_capacity_)
+    if (self.has_user_specified_rate_): n += 1 + self.lengthString(len(self.user_specified_rate_))
+    return n + 12
+
+  def Clear(self):
+    self.clear_app_id()
+    self.clear_queue_name()
+    self.clear_bucket_refill_per_second()
+    self.clear_bucket_capacity()
+    self.clear_user_specified_rate()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.app_id_)
+    out.putVarInt32(18)
+    out.putPrefixedString(self.queue_name_)
+    out.putVarInt32(25)
+    out.putDouble(self.bucket_refill_per_second_)
+    out.putVarInt32(32)
+    out.putVarInt32(self.bucket_capacity_)
+    if (self.has_user_specified_rate_):
+      out.putVarInt32(42)
+      out.putPrefixedString(self.user_specified_rate_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_app_id(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.set_queue_name(d.getPrefixedString())
+        continue
+      if tt == 25:
+        self.set_bucket_refill_per_second(d.getDouble())
+        continue
+      if tt == 32:
+        self.set_bucket_capacity(d.getVarInt32())
+        continue
+      if tt == 42:
+        self.set_user_specified_rate(d.getPrefixedString())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
+    if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
+    if self.has_bucket_refill_per_second_: res+=prefix+("bucket_refill_per_second: %s\n" % self.DebugFormat(self.bucket_refill_per_second_))
+    if self.has_bucket_capacity_: res+=prefix+("bucket_capacity: %s\n" % self.DebugFormatInt32(self.bucket_capacity_))
+    if self.has_user_specified_rate_: res+=prefix+("user_specified_rate: %s\n" % self.DebugFormatString(self.user_specified_rate_))
+    return res
+
+  kapp_id = 1
+  kqueue_name = 2
+  kbucket_refill_per_second = 3
+  kbucket_capacity = 4
+  kuser_specified_rate = 5
+
+  _TEXT = (
+   "ErrorCode",
+   "app_id",
+   "queue_name",
+   "bucket_refill_per_second",
+   "bucket_capacity",
+   "user_specified_rate",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.DOUBLE,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TaskQueueUpdateQueueResponse(ProtocolBuffer.ProtocolMessage):
+
+  def __init__(self, contents=None):
+    pass
+    if contents is not None: self.MergeFromString(contents)
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+
+  def Equals(self, x):
+    if x is self: return 1
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    return n + 0
+
+  def Clear(self):
+    pass
+
+  def OutputUnchecked(self, out):
+    pass
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    return res
+
+
+  _TEXT = (
+   "ErrorCode",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TaskQueueFetchQueuesRequest(ProtocolBuffer.ProtocolMessage):
+  has_app_id_ = 0
+  app_id_ = ""
+  has_max_rows_ = 0
+  max_rows_ = 0
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def app_id(self): return self.app_id_
+
+  def set_app_id(self, x):
+    self.has_app_id_ = 1
+    self.app_id_ = x
+
+  def clear_app_id(self):
+    if self.has_app_id_:
+      self.has_app_id_ = 0
+      self.app_id_ = ""
+
+  def has_app_id(self): return self.has_app_id_
+
+  def max_rows(self): return self.max_rows_
+
+  def set_max_rows(self, x):
+    self.has_max_rows_ = 1
+    self.max_rows_ = x
+
+  def clear_max_rows(self):
+    if self.has_max_rows_:
+      self.has_max_rows_ = 0
+      self.max_rows_ = 0
+
+  def has_max_rows(self): return self.has_max_rows_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_app_id()): self.set_app_id(x.app_id())
+    if (x.has_max_rows()): self.set_max_rows(x.max_rows())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_app_id_ != x.has_app_id_: return 0
+    if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
+    if self.has_max_rows_ != x.has_max_rows_: return 0
+    if self.has_max_rows_ and self.max_rows_ != x.max_rows_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_app_id_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: app_id not set.')
+    if (not self.has_max_rows_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: max_rows not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.app_id_))
+    n += self.lengthVarInt64(self.max_rows_)
+    return n + 2
+
+  def Clear(self):
+    self.clear_app_id()
+    self.clear_max_rows()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.app_id_)
+    out.putVarInt32(16)
+    out.putVarInt32(self.max_rows_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_app_id(d.getPrefixedString())
+        continue
+      if tt == 16:
+        self.set_max_rows(d.getVarInt32())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
+    if self.has_max_rows_: res+=prefix+("max_rows: %s\n" % self.DebugFormatInt32(self.max_rows_))
+    return res
+
+  kapp_id = 1
+  kmax_rows = 2
+
+  _TEXT = (
+   "ErrorCode",
+   "app_id",
+   "max_rows",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TaskQueueFetchQueuesResponse_Queue(ProtocolBuffer.ProtocolMessage):
+  has_queue_name_ = 0
+  queue_name_ = ""
+  has_bucket_refill_per_second_ = 0
+  bucket_refill_per_second_ = 0.0
+  has_bucket_capacity_ = 0
+  bucket_capacity_ = 0.0
+  has_user_specified_rate_ = 0
+  user_specified_rate_ = ""
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def queue_name(self): return self.queue_name_
+
+  def set_queue_name(self, x):
+    self.has_queue_name_ = 1
+    self.queue_name_ = x
+
+  def clear_queue_name(self):
+    if self.has_queue_name_:
+      self.has_queue_name_ = 0
+      self.queue_name_ = ""
+
+  def has_queue_name(self): return self.has_queue_name_
+
+  def bucket_refill_per_second(self): return self.bucket_refill_per_second_
+
+  def set_bucket_refill_per_second(self, x):
+    self.has_bucket_refill_per_second_ = 1
+    self.bucket_refill_per_second_ = x
+
+  def clear_bucket_refill_per_second(self):
+    if self.has_bucket_refill_per_second_:
+      self.has_bucket_refill_per_second_ = 0
+      self.bucket_refill_per_second_ = 0.0
+
+  def has_bucket_refill_per_second(self): return self.has_bucket_refill_per_second_
+
+  def bucket_capacity(self): return self.bucket_capacity_
+
+  def set_bucket_capacity(self, x):
+    self.has_bucket_capacity_ = 1
+    self.bucket_capacity_ = x
+
+  def clear_bucket_capacity(self):
+    if self.has_bucket_capacity_:
+      self.has_bucket_capacity_ = 0
+      self.bucket_capacity_ = 0.0
+
+  def has_bucket_capacity(self): return self.has_bucket_capacity_
+
+  def user_specified_rate(self): return self.user_specified_rate_
+
+  def set_user_specified_rate(self, x):
+    self.has_user_specified_rate_ = 1
+    self.user_specified_rate_ = x
+
+  def clear_user_specified_rate(self):
+    if self.has_user_specified_rate_:
+      self.has_user_specified_rate_ = 0
+      self.user_specified_rate_ = ""
+
+  def has_user_specified_rate(self): return self.has_user_specified_rate_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_queue_name()): self.set_queue_name(x.queue_name())
+    if (x.has_bucket_refill_per_second()): self.set_bucket_refill_per_second(x.bucket_refill_per_second())
+    if (x.has_bucket_capacity()): self.set_bucket_capacity(x.bucket_capacity())
+    if (x.has_user_specified_rate()): self.set_user_specified_rate(x.user_specified_rate())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_queue_name_ != x.has_queue_name_: return 0
+    if self.has_queue_name_ and self.queue_name_ != x.queue_name_: return 0
+    if self.has_bucket_refill_per_second_ != x.has_bucket_refill_per_second_: return 0
+    if self.has_bucket_refill_per_second_ and self.bucket_refill_per_second_ != x.bucket_refill_per_second_: return 0
+    if self.has_bucket_capacity_ != x.has_bucket_capacity_: return 0
+    if self.has_bucket_capacity_ and self.bucket_capacity_ != x.bucket_capacity_: return 0
+    if self.has_user_specified_rate_ != x.has_user_specified_rate_: return 0
+    if self.has_user_specified_rate_ and self.user_specified_rate_ != x.user_specified_rate_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_queue_name_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: queue_name not set.')
+    if (not self.has_bucket_refill_per_second_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: bucket_refill_per_second not set.')
+    if (not self.has_bucket_capacity_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: bucket_capacity not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.queue_name_))
+    if (self.has_user_specified_rate_): n += 1 + self.lengthString(len(self.user_specified_rate_))
+    return n + 19
+
+  def Clear(self):
+    self.clear_queue_name()
+    self.clear_bucket_refill_per_second()
+    self.clear_bucket_capacity()
+    self.clear_user_specified_rate()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(18)
+    out.putPrefixedString(self.queue_name_)
+    out.putVarInt32(25)
+    out.putDouble(self.bucket_refill_per_second_)
+    out.putVarInt32(33)
+    out.putDouble(self.bucket_capacity_)
+    if (self.has_user_specified_rate_):
+      out.putVarInt32(42)
+      out.putPrefixedString(self.user_specified_rate_)
+
+  def TryMerge(self, d):
+    while 1:
+      tt = d.getVarInt32()
+      if tt == 12: break
+      if tt == 18:
+        self.set_queue_name(d.getPrefixedString())
+        continue
+      if tt == 25:
+        self.set_bucket_refill_per_second(d.getDouble())
+        continue
+      if tt == 33:
+        self.set_bucket_capacity(d.getDouble())
+        continue
+      if tt == 42:
+        self.set_user_specified_rate(d.getPrefixedString())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_queue_name_: res+=prefix+("queue_name: %s\n" % self.DebugFormatString(self.queue_name_))
+    if self.has_bucket_refill_per_second_: res+=prefix+("bucket_refill_per_second: %s\n" % self.DebugFormat(self.bucket_refill_per_second_))
+    if self.has_bucket_capacity_: res+=prefix+("bucket_capacity: %s\n" % self.DebugFormat(self.bucket_capacity_))
+    if self.has_user_specified_rate_: res+=prefix+("user_specified_rate: %s\n" % self.DebugFormatString(self.user_specified_rate_))
+    return res
+
+class TaskQueueFetchQueuesResponse(ProtocolBuffer.ProtocolMessage):
+
+  def __init__(self, contents=None):
+    self.queue_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def queue_size(self): return len(self.queue_)
+  def queue_list(self): return self.queue_
+
+  def queue(self, i):
+    return self.queue_[i]
+
+  def mutable_queue(self, i):
+    return self.queue_[i]
+
+  def add_queue(self):
+    x = TaskQueueFetchQueuesResponse_Queue()
+    self.queue_.append(x)
+    return x
+
+  def clear_queue(self):
+    self.queue_ = []
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.queue_size()): self.add_queue().CopyFrom(x.queue(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.queue_) != len(x.queue_): return 0
+    for e1, e2 in zip(self.queue_, x.queue_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.queue_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 2 * len(self.queue_)
+    for i in xrange(len(self.queue_)): n += self.queue_[i].ByteSize()
+    return n + 0
+
+  def Clear(self):
+    self.clear_queue()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.queue_)):
+      out.putVarInt32(11)
+      self.queue_[i].OutputUnchecked(out)
+      out.putVarInt32(12)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 11:
+        self.add_queue().TryMerge(d)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.queue_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("Queue%s {\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+"}\n"
+      cnt+=1
+    return res
+
+  kQueueGroup = 1
+  kQueuequeue_name = 2
+  kQueuebucket_refill_per_second = 3
+  kQueuebucket_capacity = 4
+  kQueueuser_specified_rate = 5
+
+  _TEXT = (
+   "ErrorCode",
+   "Queue",
+   "queue_name",
+   "bucket_refill_per_second",
+   "bucket_capacity",
+   "user_specified_rate",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STARTGROUP,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.DOUBLE,
+
+   ProtocolBuffer.Encoder.DOUBLE,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TaskQueueFetchQueueStatsRequest(ProtocolBuffer.ProtocolMessage):
+  has_app_id_ = 0
+  app_id_ = ""
+  has_max_num_tasks_ = 0
+  max_num_tasks_ = 0
+
+  def __init__(self, contents=None):
+    self.queue_name_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def app_id(self): return self.app_id_
+
+  def set_app_id(self, x):
+    self.has_app_id_ = 1
+    self.app_id_ = x
+
+  def clear_app_id(self):
+    if self.has_app_id_:
+      self.has_app_id_ = 0
+      self.app_id_ = ""
+
+  def has_app_id(self): return self.has_app_id_
+
+  def queue_name_size(self): return len(self.queue_name_)
+  def queue_name_list(self): return self.queue_name_
+
+  def queue_name(self, i):
+    return self.queue_name_[i]
+
+  def set_queue_name(self, i, x):
+    self.queue_name_[i] = x
+
+  def add_queue_name(self, x):
+    self.queue_name_.append(x)
+
+  def clear_queue_name(self):
+    self.queue_name_ = []
+
+  def max_num_tasks(self): return self.max_num_tasks_
+
+  def set_max_num_tasks(self, x):
+    self.has_max_num_tasks_ = 1
+    self.max_num_tasks_ = x
+
+  def clear_max_num_tasks(self):
+    if self.has_max_num_tasks_:
+      self.has_max_num_tasks_ = 0
+      self.max_num_tasks_ = 0
+
+  def has_max_num_tasks(self): return self.has_max_num_tasks_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_app_id()): self.set_app_id(x.app_id())
+    for i in xrange(x.queue_name_size()): self.add_queue_name(x.queue_name(i))
+    if (x.has_max_num_tasks()): self.set_max_num_tasks(x.max_num_tasks())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_app_id_ != x.has_app_id_: return 0
+    if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
+    if len(self.queue_name_) != len(x.queue_name_): return 0
+    for e1, e2 in zip(self.queue_name_, x.queue_name_):
+      if e1 != e2: return 0
+    if self.has_max_num_tasks_ != x.has_max_num_tasks_: return 0
+    if self.has_max_num_tasks_ and self.max_num_tasks_ != x.max_num_tasks_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_app_id_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: app_id not set.')
+    if (not self.has_max_num_tasks_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: max_num_tasks not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(len(self.app_id_))
+    n += 1 * len(self.queue_name_)
+    for i in xrange(len(self.queue_name_)): n += self.lengthString(len(self.queue_name_[i]))
+    n += self.lengthVarInt64(self.max_num_tasks_)
+    return n + 2
+
+  def Clear(self):
+    self.clear_app_id()
+    self.clear_queue_name()
+    self.clear_max_num_tasks()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putPrefixedString(self.app_id_)
+    for i in xrange(len(self.queue_name_)):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.queue_name_[i])
+    out.putVarInt32(24)
+    out.putVarInt32(self.max_num_tasks_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        self.set_app_id(d.getPrefixedString())
+        continue
+      if tt == 18:
+        self.add_queue_name(d.getPrefixedString())
+        continue
+      if tt == 24:
+        self.set_max_num_tasks(d.getVarInt32())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
+    cnt=0
+    for e in self.queue_name_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("queue_name%s: %s\n" % (elm, self.DebugFormatString(e)))
+      cnt+=1
+    if self.has_max_num_tasks_: res+=prefix+("max_num_tasks: %s\n" % self.DebugFormatInt32(self.max_num_tasks_))
+    return res
+
+  kapp_id = 1
+  kqueue_name = 2
+  kmax_num_tasks = 3
+
+  _TEXT = (
+   "ErrorCode",
+   "app_id",
+   "queue_name",
+   "max_num_tasks",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class TaskQueueFetchQueueStatsResponse_QueueStats(ProtocolBuffer.ProtocolMessage):
+  has_num_tasks_ = 0
+  num_tasks_ = 0
+  has_oldest_eta_usec_ = 0
+  oldest_eta_usec_ = 0
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def num_tasks(self): return self.num_tasks_
+
+  def set_num_tasks(self, x):
+    self.has_num_tasks_ = 1
+    self.num_tasks_ = x
+
+  def clear_num_tasks(self):
+    if self.has_num_tasks_:
+      self.has_num_tasks_ = 0
+      self.num_tasks_ = 0
+
+  def has_num_tasks(self): return self.has_num_tasks_
+
+  def oldest_eta_usec(self): return self.oldest_eta_usec_
+
+  def set_oldest_eta_usec(self, x):
+    self.has_oldest_eta_usec_ = 1
+    self.oldest_eta_usec_ = x
+
+  def clear_oldest_eta_usec(self):
+    if self.has_oldest_eta_usec_:
+      self.has_oldest_eta_usec_ = 0
+      self.oldest_eta_usec_ = 0
+
+  def has_oldest_eta_usec(self): return self.has_oldest_eta_usec_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_num_tasks()): self.set_num_tasks(x.num_tasks())
+    if (x.has_oldest_eta_usec()): self.set_oldest_eta_usec(x.oldest_eta_usec())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_num_tasks_ != x.has_num_tasks_: return 0
+    if self.has_num_tasks_ and self.num_tasks_ != x.num_tasks_: return 0
+    if self.has_oldest_eta_usec_ != x.has_oldest_eta_usec_: return 0
+    if self.has_oldest_eta_usec_ and self.oldest_eta_usec_ != x.oldest_eta_usec_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_num_tasks_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: num_tasks not set.')
+    if (not self.has_oldest_eta_usec_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: oldest_eta_usec not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthVarInt64(self.num_tasks_)
+    n += self.lengthVarInt64(self.oldest_eta_usec_)
+    return n + 2
+
+  def Clear(self):
+    self.clear_num_tasks()
+    self.clear_oldest_eta_usec()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(16)
+    out.putVarInt32(self.num_tasks_)
+    out.putVarInt32(24)
+    out.putVarInt64(self.oldest_eta_usec_)
+
+  def TryMerge(self, d):
+    while 1:
+      tt = d.getVarInt32()
+      if tt == 12: break
+      if tt == 16:
+        self.set_num_tasks(d.getVarInt32())
+        continue
+      if tt == 24:
+        self.set_oldest_eta_usec(d.getVarInt64())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_num_tasks_: res+=prefix+("num_tasks: %s\n" % self.DebugFormatInt32(self.num_tasks_))
+    if self.has_oldest_eta_usec_: res+=prefix+("oldest_eta_usec: %s\n" % self.DebugFormatInt64(self.oldest_eta_usec_))
+    return res
+
+class TaskQueueFetchQueueStatsResponse(ProtocolBuffer.ProtocolMessage):
+
+  def __init__(self, contents=None):
+    self.queuestats_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def queuestats_size(self): return len(self.queuestats_)
+  def queuestats_list(self): return self.queuestats_
+
+  def queuestats(self, i):
+    return self.queuestats_[i]
+
+  def mutable_queuestats(self, i):
+    return self.queuestats_[i]
+
+  def add_queuestats(self):
+    x = TaskQueueFetchQueueStatsResponse_QueueStats()
+    self.queuestats_.append(x)
+    return x
+
+  def clear_queuestats(self):
+    self.queuestats_ = []
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.queuestats_size()): self.add_queuestats().CopyFrom(x.queuestats(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.queuestats_) != len(x.queuestats_): return 0
+    for e1, e2 in zip(self.queuestats_, x.queuestats_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.queuestats_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 2 * len(self.queuestats_)
+    for i in xrange(len(self.queuestats_)): n += self.queuestats_[i].ByteSize()
+    return n + 0
+
+  def Clear(self):
+    self.clear_queuestats()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.queuestats_)):
+      out.putVarInt32(11)
+      self.queuestats_[i].OutputUnchecked(out)
+      out.putVarInt32(12)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 11:
+        self.add_queuestats().TryMerge(d)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.queuestats_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("QueueStats%s {\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+"}\n"
+      cnt+=1
+    return res
+
+  kQueueStatsGroup = 1
+  kQueueStatsnum_tasks = 2
+  kQueueStatsoldest_eta_usec = 3
+
+  _TEXT = (
+   "ErrorCode",
+   "QueueStats",
+   "num_tasks",
+   "oldest_eta_usec",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STARTGROUP,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+
+__all__ = ['TaskQueueServiceError','TaskQueueAddRequest','TaskQueueAddRequest_Header','TaskQueueAddResponse','TaskQueueUpdateQueueRequest','TaskQueueUpdateQueueResponse','TaskQueueFetchQueuesRequest','TaskQueueFetchQueuesResponse','TaskQueueFetchQueuesResponse_Queue','TaskQueueFetchQueueStatsRequest','TaskQueueFetchQueueStatsResponse','TaskQueueFetchQueueStatsResponse_QueueStats']
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/labs/taskqueue/taskqueue_stub.py	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,251 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Stub version of the Task Queue API.
+
+This stub only stores tasks; it doesn't actually run them. It also validates
+the tasks by checking their queue name against the queue.yaml.
+
+As well as implementing Task Queue API functions, the stub exposes various other
+functions that are used by the dev_appserver's admin console to display the
+application's queues and tasks.
+"""
+
+
+
+import base64
+import datetime
+import os
+
+import taskqueue_service_pb
+
+from google.appengine.api import apiproxy_stub
+from google.appengine.api import queueinfo
+from google.appengine.api import urlfetch
+from google.appengine.runtime import apiproxy_errors
+
+
+DEFAULT_RATE = '5.00/s'
+
+DEFAULT_BUCKET_SIZE = 5
+
+
+def _ParseQueueYaml(unused_self, root_path):
+  """Load the queue.yaml file and parse it."""
+  if root_path is None:
+    return None
+  for queueyaml in ('queue.yaml', 'queue.yml'):
+    try:
+      fh = open(os.path.join(root_path, queueyaml), 'r')
+    except IOError:
+      continue
+    try:
+      queue_info = queueinfo.LoadSingleQueue(fh)
+      return queue_info
+    finally:
+      fh.close()
+  return None
+
+
+def _CompareEta(a, b):
+  """Python sort comparator for task ETAs."""
+  if a.eta_usec() > b.eta_usec():
+    return 1
+  if a.eta_usec() < b.eta_usec():
+    return -1
+  return 0
+
+
+def _FormatEta(eta_usec):
+  """Formats a task ETA as a date string in UTC."""
+  eta = datetime.datetime.fromtimestamp(eta_usec/1000000)
+  return eta.strftime('%Y/%m/%d %H:%M:%S')
+
+
+def _EtaDelta(eta_usec):
+  """Formats a task ETA as a relative time string."""
+  eta = datetime.datetime.fromtimestamp(eta_usec/1000000)
+  now = datetime.datetime.utcnow()
+  if eta > now:
+    return str(eta - now) + ' from now'
+  else:
+    return str(now - eta) + ' ago'
+
+
+class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
+  """Python only task queue service stub.
+
+  This stub does not attempt to automatically execute tasks.  Instead, it
+  stores them for display on a console.  The user may manually execute the
+  tasks from the console.
+  """
+
+  queue_yaml_parser = _ParseQueueYaml
+
+  def __init__(self, service_name='taskqueue', root_path=None):
+    """Constructor.
+
+    Args:
+      service_name: Service name expected for all calls.
+      root_path: Root path to the directory of the application which may contain
+        a queue.yaml file. If None, then it's assumed no queue.yaml file is
+        available.
+    """
+    super(TaskQueueServiceStub, self).__init__(service_name)
+    self.taskqueues = {}
+    self.next_task_id = 1
+    self.root_path = root_path
+
+  def _Dynamic_Add(self, request, unused_response):
+    if not self._ValidQueue(request.queue_name()):
+      raise apiproxy_errors.ApplicationError(
+          taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
+      return
+
+    if not request.task_name():
+      request.set_task_name('task%d' % self.next_task_id)
+      self.next_task_id += 1
+
+    tasks = self.taskqueues.setdefault(request.queue_name(), [])
+    tasks.append(request)
+    tasks.sort(_CompareEta)
+    return
+
+  def _ValidQueue(self, queue_name):
+    if queue_name == 'default':
+      return True
+    queue_info = self.queue_yaml_parser(self.root_path)
+    if queue_info and queue_info.queue:
+      for entry in queue_info.queue:
+        if entry.name == queue_name:
+          return True
+    return False
+
+  def GetQueues(self):
+    """Gets all the applications's queues.
+
+    Returns:
+      A list of dictionaries, where each dictionary contains one queue's
+      attributes.
+    """
+    queues = []
+    queue_info = self.queue_yaml_parser(self.root_path)
+    has_default = False
+    if queue_info and queue_info.queue:
+      for entry in queue_info.queue:
+        if entry.name == 'default':
+          has_default = True
+        queue = {}
+        queues.append(queue)
+        queue['name'] = entry.name
+        queue['max_rate'] = entry.rate
+        if entry.bucket_size:
+          queue['bucket_size'] = entry.bucket_size
+        else:
+          queue['bucket_size'] = DEFAULT_BUCKET_SIZE
+
+        tasks = self.taskqueues.setdefault(entry.name, [])
+        if tasks:
+          queue['oldest_task'] = _FormatEta(tasks[0].eta_usec())
+          queue['eta_delta'] = _EtaDelta(tasks[0].eta_usec())
+        else:
+          queue['oldest_task'] = ''
+        queue['tasks_in_queue'] = len(tasks)
+
+    if not has_default:
+      queue = {}
+      queues.append(queue)
+      queue['name'] = 'default'
+      queue['max_rate'] = DEFAULT_RATE
+      queue['bucket_size'] = DEFAULT_BUCKET_SIZE
+
+      tasks = self.taskqueues.get('default', [])
+      if tasks:
+        queue['oldest_task'] = _FormatEta(tasks[0].eta_usec())
+        queue['eta_delta'] = _EtaDelta(tasks[0].eta_usec())
+      else:
+        queue['oldest_task'] = ''
+      queue['tasks_in_queue'] = len(tasks)
+    return queues
+
+  def GetTasks(self, queue_name):
+    """Gets a queue's tasks.
+
+    Args:
+      queue_name: Queue's name to return tasks for.
+
+    Returns:
+      A list of dictionaries, where each dictionary contains one task's
+      attributes.
+    """
+    tasks = self.taskqueues.get(queue_name, [])
+    result_tasks = []
+    for task_request in tasks:
+      task = {}
+      result_tasks.append(task)
+      task['name'] = task_request.task_name()
+      task['url'] = task_request.url()
+      method = task_request.method()
+      if (method == taskqueue_service_pb.TaskQueueAddRequest.GET):
+        task['method'] = 'GET'
+      elif (method == taskqueue_service_pb.TaskQueueAddRequest.POST):
+        task['method'] = 'POST'
+      elif (method == taskqueue_service_pb.TaskQueueAddRequest.HEAD):
+        task['method'] = 'HEAD'
+      elif (method == taskqueue_service_pb.TaskQueueAddRequest.PUT):
+        task['method'] = 'PUT'
+      elif (method == taskqueue_service_pb.TaskQueueAddRequest.DELETE):
+        task['method'] = 'DELETE'
+
+      task['eta'] = _FormatEta(task_request.eta_usec())
+      task['eta_delta'] = _EtaDelta(task_request.eta_usec())
+      task['body'] = base64.b64encode(task_request.body())
+      headers = urlfetch._CaselessDict()
+      task['headers'] = headers
+      for req_header in task_request.header_list():
+        headers[req_header.key()] = req_header.value()
+
+      headers['X-AppEngine-QueueName'] = queue_name
+      headers['X-AppEngine-TaskName'] = task['name']
+      headers['X-AppEngine-TaskRetryCount'] = '0'
+      headers['X-AppEngine-Development-Payload'] = '1'
+      headers['Content-Length'] = len(task['body'])
+      headers['Content-Type'] = headers.get(
+          'Content-Type', 'application/octet-stream')
+
+    return result_tasks
+
+  def DeleteTask(self, queue_name, task_name):
+    """Deletes a task from a queue.
+
+    Args:
+      queue_name: the name of the queue to delete the task from.
+      task_name: the name of the task to delete.
+    """
+    tasks = self.taskqueues.get(queue_name, [])
+    for task in tasks:
+      if task.task_name() == task_name:
+        tasks.remove(task)
+        return
+
+  def FlushQueue(self, queue_name):
+    """Removes all tasks from a queue.
+
+    Args:
+      queue_name: the name of the queue to remove tasks from.
+    """
+    self.taskqueues[queue_name] = []
--- a/thirdparty/google_appengine/google/appengine/api/memcache/__init__.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/api/memcache/__init__.py	Fri Jun 19 16:13:32 2009 +0200
@@ -28,6 +28,7 @@
 import math
 import pickle
 import types
+import sha
 
 from google.appengine.api import api_base_pb
 from google.appengine.api import apiproxy_stub_map
@@ -91,14 +92,14 @@
       (which does not have the prefix).
 
   Returns:
-    The key as a non-unicode string prepended with key_prefix. This is the key
-    sent to and stored by the server.
+    The key as a non-unicode string prepended with key_prefix. This is
+    the key sent to and stored by the server. If the resulting key is
+    longer then MAX_KEY_SIZE, it will be hashed with sha1 and will be
+    replaced with the hex representation of the said hash.
 
   Raises:
     TypeError: If provided key isn't a string or tuple of (int, string)
       or key_prefix or server_to_user_dict are of the wrong type.
-    ValueError: If the key, when translated to the server key, is more than
-      250 bytes in length.
   """
   if type(key) is types.TupleType:
     key = key[1]
@@ -113,8 +114,7 @@
     server_key = server_key.encode('utf-8')
 
   if len(server_key) > MAX_KEY_SIZE:
-    raise ValueError('Keys may not be more than %d bytes in length, '
-                     'received %d bytes' % (MAX_KEY_SIZE, len(server_key)))
+    server_key = sha.new(server_key).hexdigest()
 
   if server_to_user_dict is not None:
     if not isinstance(server_to_user_dict, dict):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/queueinfo.py	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""QueueInfo tools.
+
+A library for working with QueueInfo records, describing task queue entries
+for an application. Supports loading the records from queue.yaml.
+
+A queue has two required parameters and one optional one. The required
+parameters are 'name' (must be unique for an appid) and 'rate' (the
+rate at which jobs in the queue are run). There is an optional 'bucket_size'
+that will allow tokens to be 'saved up' and bucket_size. Rate and bucket_size rate are
+expressed as number/unit, with number being an int or a float, and unit being
+one of 's' (seconds), 'm' (minutes), 'h' (hours) or 'd' (days).
+
+An example of the use of bucket_size rate: the free email quota is 2000/d, and the
+maximum you can send in a single minute is 11. So we can define a queue for
+sending email like this:
+
+queue:
+- name: mail_queue
+  rate: 2000/d
+  bucket_size: 10/m
+
+If this queue had been idle for a while before some jobs were submitted to it,
+the first 10 jobs submitted would be run immediately, then subsequent ones
+would be run once every 40s or so. The limit of 2000 per day would still apply.
+"""
+
+
+
+from google.appengine.api import validation
+from google.appengine.api import yaml_builder
+from google.appengine.api import yaml_listener
+from google.appengine.api import yaml_object
+
+_NAME_REGEX = r'^[A-Za-z0-9-]{0,499}$'
+_RATE_REGEX = r'^[0-9]+(\.[0-9]+)?/[smhd]'
+
+QUEUE = 'queue'
+
+NAME = 'name'
+RATE = 'rate'
+BUCKET_SIZE = 'bucket_size'
+
+
+class MalformedQueueConfiguration(Exception):
+  """Configuration file for Task Queue is malformed."""
+
+
+class QueueEntry(validation.Validated):
+  """A queue entry describes a single task queue."""
+  ATTRIBUTES = {
+      NAME: _NAME_REGEX,
+      RATE: _RATE_REGEX,
+      BUCKET_SIZE: validation.Optional(validation.TYPE_INT),
+  }
+
+
+class QueueInfoExternal(validation.Validated):
+  """QueueInfoExternal describes all queue entries for an application."""
+  ATTRIBUTES = {
+      QUEUE: validation.Optional(validation.Repeated(QueueEntry))
+  }
+
+
+def LoadSingleQueue(queue_info):
+  """Load a queue.yaml file or string and return a QueueInfoExternal object.
+
+  Args:
+    queue_info: the contents of a queue.yaml file, as a string.
+
+  Returns:
+    A QueueInfoExternal object.
+  """
+  builder = yaml_object.ObjectBuilder(QueueInfoExternal)
+  handler = yaml_builder.BuilderHandler(builder)
+  listener = yaml_listener.EventListener(handler)
+  listener.Parse(queue_info)
+
+  queue_info = handler.GetResults()
+  if len(queue_info) < 1:
+    raise MalformedQueueConfiguration('Empty queue configuration.')
+  if len(queue_info) > 1:
+    raise MalformedQueueConfiguration('Multiple queue: sections '
+                                      'in configuration.')
+  return queue_info[0]
+
+
+def ParseRate(rate):
+  """Parses a rate string in the form number/unit.
+
+  The unit is one of s (seconds), m (minutes), h (hours) or d (days).
+
+  Args:
+    rate: the rate string.
+
+  Returns:
+    a floating point number representing the rate/second.
+
+  Raises:
+    MalformedQueueConfiguration: if the rate is invalid
+  """
+  elements = rate.split('/')
+  if len(elements) != 2:
+    raise MalformedQueueConfiguration('Rate "%s" is invalid.' % rate)
+  number, unit = elements
+  try:
+    number = float(number)
+  except ValueError:
+    raise MalformedQueueConfiguration('Rate "%s" is invalid:'
+                                          ' "%s" is not a number.' %
+                                          (rate, number))
+  if unit not in 'smhd':
+    raise MalformedQueueConfiguration('Rate "%s" is invalid:'
+                                          ' "%s" is not one of s, m, h, d.' %
+                                          (rate, unit))
+  if unit == 's':
+    return number
+  if unit == 'm':
+    return number/60
+  if unit == 'h':
+    return number/(60 * 60)
+  if unit == 'd':
+    return number/(24 * 60 * 60)
--- a/thirdparty/google_appengine/google/appengine/api/urlfetch.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/api/urlfetch.py	Fri Jun 19 16:13:32 2009 +0200
@@ -30,7 +30,6 @@
 import urllib2
 import urlparse
 
-from google.appengine.api import apiproxy_rpc
 from google.appengine.api import apiproxy_stub_map
 from google.appengine.api import urlfetch_service_pb
 from google.appengine.api.urlfetch_errors import *
@@ -187,23 +186,23 @@
   return False
 
 
-def __create_rpc(deadline=None, callback=None):
-  """DO NOT USE.  WILL CHANGE AND BREAK YOUR CODE.
-
-  Creates an RPC object for use with the urlfetch API.
+def create_rpc(deadline=None, callback=None):
+  """Creates an RPC object for use with the urlfetch API.
 
   Args:
-    deadline: deadline in seconds for the operation.
-    callback: callable to invoke on completion.
+    deadline: Optional deadline in seconds for the operation; the default
+      is a system-specific deadline (typically 5 seconds).
+    callback: Optional callable to invoke on completion.
 
   Returns:
-    A _URLFetchRPC object.
+    An apiproxy_stub_map.UserRPC object specialized for this service.
   """
-  return _URLFetchRPC(deadline, callback)
+  return apiproxy_stub_map.UserRPC('urlfetch', deadline, callback)
 
 
-def fetch(url, payload=None, method=GET, headers={}, allow_truncated=False,
-          follow_redirects=True, deadline=None):
+def fetch(url, payload=None, method=GET, headers={},
+          allow_truncated=False, follow_redirects=True,
+          deadline=None):
   """Fetches the given HTTP URL, blocking until the result is returned.
 
   Other optional parameters are:
@@ -212,7 +211,7 @@
        this is ignored if the method is not POST or PUT.
      headers: dictionary of HTTP headers to send with the request
      allow_truncated: if true, truncate large responses and return them without
-       error. otherwise, ResponseTooLargeError will be thrown when a response is
+       error. Otherwise, ResponseTooLargeError is raised when a response is
        truncated.
      follow_redirects: if true (the default), redirects are
        transparently followed and the response (if less than 5
@@ -236,158 +235,108 @@
   of the returned structure, so HTTP errors like 404 do not result in an
   exception.
   """
-  rpc = __create_rpc(deadline=deadline)
-  rpc.make_call(url, payload, method, headers, follow_redirects)
-  return rpc.get_result(allow_truncated)
+  rpc = create_rpc(deadline=deadline)
+  make_fetch_call(rpc, url, payload, method, headers,
+                  allow_truncated, follow_redirects)
+  return rpc.get_result()
 
 
-class _URLFetchRPC(object):
-  """A RPC object that manages the urlfetch RPC.
-
-  Its primary functions are the following:
-  1. Convert error codes to the URLFetchServiceError namespace and raise them
-     when get_result is called.
-  2. Wrap the urlfetch response with a _URLFetchResult object.
-  """
-
-  def __init__(self, deadline=None, callback=None):
-    """Construct a new url fetch RPC.
+def make_fetch_call(rpc, url, payload=None, method=GET, headers={},
+                    allow_truncated=False, follow_redirects=True):
+  """Executes the RPC call to fetch a given HTTP URL.
 
-    Args:
-      deadline: deadline in seconds for the operation.
-      callback: callable to invoke on completion.
-    """
-    self.__rpc = apiproxy_stub_map.CreateRPC('urlfetch')
-    self.__rpc.deadline = deadline
-    self.__rpc.callback = callback
-    self.__called_hooks = False
+  The first argument is a UserRPC instance.  See urlfetch.fetch for a
+  thorough description of remaining arguments.
+  """
+  assert rpc.service == 'urlfetch', repr(rpc.service)
+  if isinstance(method, basestring):
+    method = method.upper()
+  method = _URL_STRING_MAP.get(method, method)
+  if method not in _VALID_METHODS:
+    raise InvalidMethodError('Invalid method %s.' % str(method))
 
-  def make_call(self, url, payload=None, method=GET, headers={},
-                follow_redirects=True):
-    """Executes the RPC call to fetch a given HTTP URL.
+  if _is_fetching_self(url, method):
+    raise InvalidURLError("App cannot fetch the same URL as the one used for "
+                          "the request.")
 
-    See urlfetch.fetch for a thorough description of arguments.
-    """
-    assert self.__rpc.state is apiproxy_rpc.RPC.IDLE
-    if isinstance(method, basestring):
-      method = method.upper()
-    method = _URL_STRING_MAP.get(method, method)
-    if method not in _VALID_METHODS:
-      raise InvalidMethodError('Invalid method %s.' % str(method))
+  request = urlfetch_service_pb.URLFetchRequest()
+  response = urlfetch_service_pb.URLFetchResponse()
+  request.set_url(url)
 
-    if _is_fetching_self(url, method):
-      raise InvalidURLError("App cannot fetch the same URL as the one used for "
-                            "the request.")
-
-    self.__request = urlfetch_service_pb.URLFetchRequest()
-    self.__response = urlfetch_service_pb.URLFetchResponse()
-    self.__result = None
-    self.__request.set_url(url)
+  if method == GET:
+    request.set_method(urlfetch_service_pb.URLFetchRequest.GET)
+  elif method == POST:
+    request.set_method(urlfetch_service_pb.URLFetchRequest.POST)
+  elif method == HEAD:
+    request.set_method(urlfetch_service_pb.URLFetchRequest.HEAD)
+  elif method == PUT:
+    request.set_method(urlfetch_service_pb.URLFetchRequest.PUT)
+  elif method == DELETE:
+    request.set_method(urlfetch_service_pb.URLFetchRequest.DELETE)
 
-    if method == GET:
-      self.__request.set_method(urlfetch_service_pb.URLFetchRequest.GET)
-    elif method == POST:
-      self.__request.set_method(urlfetch_service_pb.URLFetchRequest.POST)
-    elif method == HEAD:
-      self.__request.set_method(urlfetch_service_pb.URLFetchRequest.HEAD)
-    elif method == PUT:
-      self.__request.set_method(urlfetch_service_pb.URLFetchRequest.PUT)
-    elif method == DELETE:
-      self.__request.set_method(urlfetch_service_pb.URLFetchRequest.DELETE)
+  if payload and (method == POST or method == PUT):
+    request.set_payload(payload)
 
-    if payload and (method == POST or method == PUT):
-      self.__request.set_payload(payload)
-
-    for key, value in headers.iteritems():
-      header_proto = self.__request.add_header()
-      header_proto.set_key(key)
-      header_proto.set_value(str(value))
+  for key, value in headers.iteritems():
+    header_proto = request.add_header()
+    header_proto.set_key(key)
+    header_proto.set_value(str(value))
 
-    self.__request.set_followredirects(follow_redirects)
-    if self.__rpc.deadline:
-      self.__request.set_deadline(self.__rpc.deadline)
+  request.set_followredirects(follow_redirects)
 
-    apiproxy_stub_map.apiproxy.GetPreCallHooks().Call(
-        'urlfetch', 'Fetch', self.__request, self.__response)
-    self.__rpc.MakeCall('urlfetch', 'Fetch', self.__request, self.__response)
+  if rpc.deadline is not None:
+    request.set_deadline(rpc.deadline)
+
+  rpc.make_call('Fetch', request, response, _get_fetch_result, allow_truncated)
 
-  def wait(self):
-    """Waits for the urlfetch RPC to finish.  Idempotent.
-    """
-    assert self.__rpc.state is not apiproxy_rpc.RPC.IDLE
-    if self.__rpc.state is apiproxy_rpc.RPC.RUNNING:
-      self.__rpc.Wait()
 
-  def check_success(self, allow_truncated=False):
-    """Check success and convert RPC exceptions to urlfetch exceptions.
+def _get_fetch_result(rpc):
+  """Check success, handle exceptions, and return converted RPC result.
 
-    This method waits for the RPC if it has not yet finished, and calls the
-    post-call hooks on the first invocation.
+  This method waits for the RPC if it has not yet finished, and calls the
+  post-call hooks on the first invocation.
 
-    Args:
-      allow_truncated: if False, an error is raised if the response was
-        truncated.
+  Args:
+    rpc: A UserRPC object.
 
-    Raises:
-      InvalidURLError if the url was invalid.
-      DownloadError if there was a problem fetching the url.
-      ResponseTooLargeError if the response was either truncated (and
-        allow_truncated is false) or if it was too big for us to download.
-    """
-    assert self.__rpc.state is not apiproxy_rpc.RPC.IDLE
-    if self.__rpc.state is apiproxy_rpc.RPC.RUNNING:
-      self.wait()
+  Raises:
+    InvalidURLError if the url was invalid.
+    DownloadError if there was a problem fetching the url.
+    ResponseTooLargeError if the response was either truncated (and
+      allow_truncated=False was passed to make_fetch_call()), or if it
+      was too big for us to download.
 
-    try:
-      self.__rpc.CheckSuccess()
-      if not self.__called_hooks:
-        self.__called_hooks = True
-        apiproxy_stub_map.apiproxy.GetPostCallHooks().Call(
-            'urlfetch', 'Fetch', self.__request, self.__response)
-    except apiproxy_errors.ApplicationError, e:
-      if (e.application_error ==
-          urlfetch_service_pb.URLFetchServiceError.INVALID_URL):
-        raise InvalidURLError(str(e))
-      if (e.application_error ==
-          urlfetch_service_pb.URLFetchServiceError.UNSPECIFIED_ERROR):
-        raise DownloadError(str(e))
-      if (e.application_error ==
-          urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR):
-        raise DownloadError(str(e))
-      if (e.application_error ==
-          urlfetch_service_pb.URLFetchServiceError.RESPONSE_TOO_LARGE):
-        raise ResponseTooLargeError(None)
-      if (e.application_error ==
-          urlfetch_service_pb.URLFetchServiceError.DEADLINE_EXCEEDED):
-        raise DownloadError(str(e))
-      raise e
+  Returns:
+    A _URLFetchResult object.
+  """
+  assert rpc.service == 'urlfetch', repr(rpc.service)
+  assert rpc.method == 'Fetch', repr(rpc.method)
+  try:
+    rpc.check_success()
+  except apiproxy_errors.ApplicationError, err:
+    if (err.application_error ==
+        urlfetch_service_pb.URLFetchServiceError.INVALID_URL):
+      raise InvalidURLError(str(err))
+    if (err.application_error ==
+        urlfetch_service_pb.URLFetchServiceError.UNSPECIFIED_ERROR):
+      raise DownloadError(str(err))
+    if (err.application_error ==
+        urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR):
+      raise DownloadError(str(err))
+    if (err.application_error ==
+        urlfetch_service_pb.URLFetchServiceError.RESPONSE_TOO_LARGE):
+      raise ResponseTooLargeError(None)
+    if (err.application_error ==
+        urlfetch_service_pb.URLFetchServiceError.DEADLINE_EXCEEDED):
+      raise DownloadError(str(err))
+    raise err
 
-    if self.__response.contentwastruncated() and not allow_truncated:
-      raise ResponseTooLargeError(_URLFetchResult(self.__response))
-
-  def get_result(self, allow_truncated=False):
-    """Returns the RPC result or raises an exception if the rpc failed.
-
-    This method waits for the RPC if not completed, and checks success.
-
-    Args:
-      allow_truncated: if False, an error is raised if the response was
-        truncated.
-
-    Returns:
-      The urlfetch result.
-
-    Raises:
-      Error if the rpc has not yet finished.
-      InvalidURLError if the url was invalid.
-      DownloadError if there was a problem fetching the url.
-      ResponseTooLargeError if the response was either truncated (and
-        allow_truncated is false) or if it was too big for us to download.
-    """
-    if self.__result is None:
-      self.check_success(allow_truncated)
-      self.__result = _URLFetchResult(self.__response)
-    return self.__result
+  response = rpc.response
+  allow_truncated = rpc.user_data
+  result = _URLFetchResult(response)
+  if response.contentwastruncated() and not allow_truncated:
+    raise ResponseTooLargeError(result)
+  return result
 
 
 Fetch = fetch
--- a/thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py	Fri Jun 19 16:13:32 2009 +0200
@@ -19,9 +19,11 @@
 
 
 
+import gzip
 import httplib
 import logging
 import socket
+import StringIO
 import urllib
 import urlparse
 
@@ -164,8 +166,11 @@
         protocol = last_protocol
 
       adjusted_headers = {
-        'Host': host,
-        'Accept': '*/*',
+          'User-Agent':
+          'AppEngine-Google; (+http://code.google.com/appengine)',
+          'Referer': 'http://localhost/',
+          'Host': host,
+          'Accept-Encoding': 'gzip',
       }
       if payload is not None:
         adjusted_headers['Content-Length'] = len(payload)
@@ -173,7 +178,12 @@
         adjusted_headers['Content-Type'] = 'application/x-www-form-urlencoded'
 
       for header in headers:
-        adjusted_headers[header.key().title()] = header.value()
+        if header.key().title().lower() == 'user-agent':
+          adjusted_headers['User-Agent'] = (
+              '%s %s' %
+              (header.value(), adjusted_headers['User-Agent']))
+        else:
+          adjusted_headers[header.key().title()] = header.value()
 
       logging.debug('Making HTTP request: host = %s, '
                     'url = %s, payload = %s, headers = %s',
@@ -219,8 +229,17 @@
               urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, error_msg)
       else:
         response.set_statuscode(http_response.status)
+        if http_response.getheader('content-encoding') == 'gzip':
+          gzip_stream = StringIO.StringIO(http_response_data)
+          gzip_file = gzip.GzipFile(fileobj=gzip_stream)
+          http_response_data = gzip_file.read()
         response.set_content(http_response_data[:MAX_RESPONSE_SIZE])
         for header_key, header_value in http_response.getheaders():
+          if (header_key.lower() == 'content-encoding' and
+              header_value == 'gzip'):
+            continue
+          if header_key.lower() == 'content-length':
+            header_value = len(response.content())
           header_proto = response.add_header()
           header_proto.set_key(header_key)
           header_proto.set_value(header_value)
@@ -245,6 +264,6 @@
     prohibited_headers = [h.key() for h in headers
                           if h.key().lower() in untrusted_headers]
     if prohibited_headers:
-      logging.warn("Stripped prohibited headers from URLFetch request: %s",
+      logging.warn('Stripped prohibited headers from URLFetch request: %s',
                    prohibited_headers)
     return (h for h in headers if h.key().lower() not in untrusted_headers)
--- a/thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py	Fri Jun 19 16:13:32 2009 +0200
@@ -382,6 +382,8 @@
   require_perfect_plan_ = 0
   has_keys_only_ = 0
   keys_only_ = 0
+  has_transaction_ = 0
+  transaction_ = None
 
   def __init__(self, contents=None):
     self.filter_ = []
@@ -560,6 +562,24 @@
 
   def has_keys_only(self): return self.has_keys_only_
 
+  def transaction(self):
+    if self.transaction_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.transaction_ is None: self.transaction_ = Transaction()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.transaction_
+
+  def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
+
+  def clear_transaction(self):
+    if self.has_transaction_:
+      self.has_transaction_ = 0;
+      if self.transaction_ is not None: self.transaction_.Clear()
+
+  def has_transaction(self): return self.has_transaction_
+
 
   def MergeFrom(self, x):
     assert x is not self
@@ -575,6 +595,7 @@
     for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
     if (x.has_require_perfect_plan()): self.set_require_perfect_plan(x.require_perfect_plan())
     if (x.has_keys_only()): self.set_keys_only(x.keys_only())
+    if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
 
   def Equals(self, x):
     if x is self: return 1
@@ -605,6 +626,8 @@
     if self.has_require_perfect_plan_ and self.require_perfect_plan_ != x.require_perfect_plan_: return 0
     if self.has_keys_only_ != x.has_keys_only_: return 0
     if self.has_keys_only_ and self.keys_only_ != x.keys_only_: return 0
+    if self.has_transaction_ != x.has_transaction_: return 0
+    if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -620,6 +643,7 @@
       if not p.IsInitialized(debug_strs): initialized=0
     for p in self.composite_index_:
       if not p.IsInitialized(debug_strs): initialized=0
+    if (self.has_transaction_ and not self.transaction_.IsInitialized(debug_strs)): initialized = 0
     return initialized
 
   def ByteSize(self):
@@ -639,6 +663,7 @@
     for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
     if (self.has_require_perfect_plan_): n += 3
     if (self.has_keys_only_): n += 3
+    if (self.has_transaction_): n += 2 + self.lengthString(self.transaction_.ByteSize())
     return n + 1
 
   def Clear(self):
@@ -654,6 +679,7 @@
     self.clear_composite_index()
     self.clear_require_perfect_plan()
     self.clear_keys_only()
+    self.clear_transaction()
 
   def OutputUnchecked(self, out):
     out.putVarInt32(10)
@@ -695,6 +721,10 @@
     if (self.has_keys_only_):
       out.putVarInt32(168)
       out.putBoolean(self.keys_only_)
+    if (self.has_transaction_):
+      out.putVarInt32(178)
+      out.putVarInt32(self.transaction_.ByteSize())
+      self.transaction_.OutputUnchecked(out)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -741,6 +771,12 @@
       if tt == 168:
         self.set_keys_only(d.getBoolean())
         continue
+      if tt == 178:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_transaction().TryMerge(tmp)
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -783,6 +819,10 @@
       cnt+=1
     if self.has_require_perfect_plan_: res+=prefix+("require_perfect_plan: %s\n" % self.DebugFormatBool(self.require_perfect_plan_))
     if self.has_keys_only_: res+=prefix+("keys_only: %s\n" % self.DebugFormatBool(self.keys_only_))
+    if self.has_transaction_:
+      res+=prefix+"transaction <\n"
+      res+=self.transaction_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
     return res
 
   kapp = 1
@@ -801,6 +841,7 @@
   kcomposite_index = 19
   krequire_perfect_plan = 20
   kkeys_only = 21
+  ktransaction = 22
 
   _TEXT = (
    "ErrorCode",
@@ -825,6 +866,7 @@
    "composite_index",
    "require_perfect_plan",
    "keys_only",
+   "transaction",
   )
 
   _TYPES = (
@@ -871,6 +913,8 @@
 
    ProtocolBuffer.Encoder.NUMERIC,
 
+   ProtocolBuffer.Encoder.STRING,
+
   )
 
   _STYLE = """"""
--- a/thirdparty/google_appengine/google/appengine/dist/__init__.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/dist/__init__.py	Fri Jun 19 16:13:32 2009 +0200
@@ -17,6 +17,20 @@
 
 """Specify the modules for which a stub exists."""
 
+__all__ = [
 
-__all__ = ['ftplib', 'httplib', 'py_imp', 'neo_cgi', 'select', 'socket',
-           'subprocess', 'tempfile']
+    'ftplib',
+    'httplib',
+    'neo_cgi',
+    'py_imp',
+    'select',
+    'socket',
+    'subprocess',
+    'tempfile',
+
+    'use_library',
+    ]
+
+from google.appengine.dist import _library
+
+use_library = _library.use_library
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/_library.py	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,281 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Code to exist off of google.appengine.dist.
+
+Kept in a separate file from the __init__ module for testing purposes.
+"""
+
+
+__all__ = ['use_library']
+
+
+import distutils.version
+import os
+import sys
+
+server_software = os.getenv('SERVER_SOFTWARE')
+USING_SDK = not server_software or server_software.startswith('Dev')
+del server_software
+
+if not USING_SDK:
+  import google
+  this_version = os.path.dirname(os.path.dirname(google.__file__))
+  versions = os.path.dirname(this_version)
+  PYTHON_LIB = os.path.dirname(versions)
+  del google, this_version, versions
+else:
+  PYTHON_LIB = '/base/python_lib'
+
+installed = {}
+
+
+def SetAllowedModule(_):
+  pass
+
+
+class UnacceptableVersionError(Exception):
+  """Raised when a version of a package that is unacceptable is requested."""
+  pass
+
+
+def DjangoVersion():
+  """Discover the version of Django installed.
+
+  Returns:
+    A distutils.version.LooseVersion.
+  """
+  import django
+  return distutils.version.LooseVersion('.'.join(map(str, django.VERSION)))
+
+
+def PylonsVersion():
+  """Discover the version of Pylons installed.
+
+  Returns:
+    A distutils.version.LooseVersion.
+  """
+  import pylons
+  return distutils.version.LooseVersion(pylons.__version__)
+
+
+PACKAGES = {
+    'django': (DjangoVersion,
+               {'1.0': None, '0.96': None}),
+
+
+
+
+
+
+
+    '_test': (lambda: distutils.version.LooseVersion('1.0'), {'1.0': None}),
+    '_testpkg': (lambda: distutils.version.LooseVersion('1.0'),
+                 {'1.0': set([('_test', '1.0')])}),
+    }
+
+
+def EqualVersions(version, baseline):
+  """Test that a version is acceptable as compared to the baseline.
+
+  Meant to be used to compare version numbers as returned by a package itself
+  and not user input.
+
+  Args:
+    version: distutils.version.LooseVersion.
+        The version that is being checked.
+    baseline: distutils.version.LooseVersion.
+        The version that one hopes version compares equal to.
+
+  Returns:
+    A bool indicating whether the versions are considered equal.
+  """
+  baseline_tuple = baseline.version
+  truncated_tuple = version.version[:len(baseline_tuple)]
+  if truncated_tuple == baseline_tuple:
+    return True
+  else:
+    return False
+
+
+def AllowInstalledLibrary(name, desired):
+  """Allow the use of a package without performing a version check.
+
+  Needed to clear a package's dependencies in case the dependencies need to be
+  imported in order to perform a version check. The version check is skipped on
+  the dependencies because the assumption is that the package that triggered
+  the call would not be installed without the proper dependencies (which might
+  be a different version than what the package explicitly requires).
+
+  Args:
+    name: Name of package.
+    desired: Desired version.
+
+  Raises:
+    UnacceptableVersion Error if the installed version of a package is
+    unacceptable.
+  """
+  if name == 'django' and desired != '0.96':
+    tail = os.path.join('lib', 'django')
+    sys.path[:] = [dirname
+                   for dirname in sys.path
+                   if not dirname.endswith(tail)]
+  CallSetAllowedModule(name, desired)
+  dependencies = PACKAGES[name][1][desired]
+  if dependencies:
+    for dep_name, dep_version in dependencies:
+      AllowInstalledLibrary(dep_name, dep_version)
+  installed[name] = desired, False
+
+
+def CheckInstalledLibrary(name, desired):
+  """Check that the library and its dependencies are installed.
+
+  Args:
+    name: Name of the library that should be installed.
+    desired: The desired version.
+
+  Raises:
+    UnacceptableVersionError if the installed version of a package is
+    unacceptable.
+  """
+  dependencies = PACKAGES[name][1][desired]
+  if dependencies:
+    for dep_name, dep_version in dependencies:
+      AllowInstalledLibrary(dep_name, dep_version)
+  CheckInstalledVersion(name, desired, explicit=True)
+
+
+def CheckInstalledVersion(name, desired, explicit):
+  """Check that the installed version of a package is acceptable.
+
+  Args:
+    name: Name of package.
+    desired: Desired version string.
+    explicit: Explicitly requested by the user or implicitly because of a
+      dependency.
+
+  Raises:
+    UnacceptableVersionError if the installed version of a package is
+    unacceptable.
+  """
+  CallSetAllowedModule(name, desired)
+  find_version = PACKAGES[name][0]
+  installed_version = find_version()
+  desired_version = distutils.version.LooseVersion(desired)
+  if not EqualVersions(installed_version, desired_version):
+    raise UnacceptableVersionError(
+        '%s %s was requested, but %s is already in use' %
+        (name, desired_version, installed_version))
+  installed[name] = desired, explicit
+
+
+def CallSetAllowedModule(name, desired):
+  """Helper to call SetAllowedModule(name), after special-casing Django."""
+  if name == 'django' and desired != '0.96':
+    tail = os.path.join('lib', 'django')
+    sys.path[:] = [dirname
+                   for dirname in sys.path
+                   if not dirname.endswith(tail)]
+  SetAllowedModule(name)
+
+
+def CreatePath(name, version):
+  """Create the path to a package."""
+  package_dir = '%s-%s' % (name, version)
+  return os.path.join(PYTHON_LIB, 'versions', 'third_party', package_dir)
+
+
+def RemoveLibrary(name):
+  """Remove a library that has been installed."""
+  installed_version, _ = installed[name]
+  path = CreatePath(name, installed_version)
+  try:
+    sys.path.remove(path)
+  except ValueError:
+    pass
+  del installed[name]
+
+
+def AddLibrary(name, version, explicit):
+  """Add a library to sys.path and 'installed'."""
+  sys.path.insert(1, CreatePath(name, version))
+  installed[name] = version, explicit
+
+
+def InstallLibrary(name, version, explicit=True):
+  """Install a package.
+
+  If the installation is explicit then the user made the installation request,
+  not a package as a dependency. Explicit installation leads to stricter
+  version checking.
+
+  Args:
+    name: Name of the requested package (already validated as available).
+    version: The desired version (already validated as available).
+    explicit: Explicitly requested by the user or implicitly because of a
+      dependency.
+  """
+  installed_version, explicitly_installed = installed.get(name, [None] * 2)
+  if name in sys.modules:
+    if explicit:
+      CheckInstalledVersion(name, version, explicit=True)
+    return
+  elif installed_version:
+    if version == installed_version:
+      return
+    if explicit:
+      if explicitly_installed:
+        raise ValueError('%s %s requested, but %s already in use' %
+                         (name, version, installed_version))
+      RemoveLibrary(name)
+    else:
+      version_ob = distutils.version.LooseVersion(version)
+      installed_ob = distutils.version.LooseVersion(installed_version)
+      if version_ob <= installed_ob:
+        return
+      else:
+        RemoveLibrary(name)
+  AddLibrary(name, version, explicit)
+  dep_details = PACKAGES[name][1][version]
+  if not dep_details:
+    return
+  for dep_name, dep_version in dep_details:
+    InstallLibrary(dep_name, dep_version, explicit=False)
+
+
+def use_library(name, version):
+  """Specify a third-party package to use.
+
+  Args:
+    name: Name of package to use.
+    version: Version of the package to use (string).
+  """
+  if name not in PACKAGES:
+    raise ValueError('%s is not a supported package' % name)
+  versions = PACKAGES[name][1].keys()
+  if version not in versions:
+    raise ValueError('%s is not a supported version for %s; '
+                     'supported versions are %s' % (version, name, versions))
+  if USING_SDK:
+    CheckInstalledLibrary(name, version)
+  else:
+    InstallLibrary(name, version, explicit=True)
+
+
+if not USING_SDK:
+  InstallLibrary('django', '0.96', explicit=False)
--- a/thirdparty/google_appengine/google/appengine/dist/httplib.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/dist/httplib.py	Fri Jun 19 16:13:32 2009 +0200
@@ -188,7 +188,10 @@
         host = '%s:%s' % (self.host, self.port)
     else:
         host = self.host
-    url = '%s://%s%s' % (self.protocol, host, self._url)
+    if not self._url.startswith(self.protocol):
+      url = '%s://%s%s' % (self.protocol, host, self._url)
+    else:
+      url = self._url
     headers = dict(self.headers)
 
     try:
@@ -237,7 +240,7 @@
   def msg(self):
     msg = mimetools.Message(StringIO.StringIO(''))
     for name, value in self._fetch_response.headers.items():
-      msg[name] = value
+      msg[name] = str(value)
     return msg
 
   version = 11
--- a/thirdparty/google_appengine/google/appengine/ext/admin/__init__.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/__init__.py	Fri Jun 19 16:13:32 2009 +0200
@@ -49,11 +49,13 @@
 else:
   HAVE_CRON = True
 
+from google.appengine.api import apiproxy_stub_map
 from google.appengine.api import datastore
 from google.appengine.api import datastore_admin
 from google.appengine.api import datastore_types
 from google.appengine.api import datastore_errors
 from google.appengine.api import memcache
+from google.appengine.api.labs import taskqueue
 from google.appengine.api import users
 from google.appengine.ext import db
 from google.appengine.ext import webapp
@@ -116,6 +118,7 @@
       'interactive_path': base_path + InteractivePageHandler.PATH,
       'interactive_execute_path': base_path + InteractiveExecuteHandler.PATH,
       'memcache_path': base_path + MemcachePageHandler.PATH,
+      'queues_path': base_path + QueuesPageHandler.PATH,
     }
     if HAVE_CRON:
       values['cron_path'] = base_path + CronPageHandler.PATH
@@ -245,6 +248,80 @@
     self.generate('cron.html', values)
 
 
+class QueuesPageHandler(BaseRequestHandler):
+  """Shows information about configured (and default) task queues."""
+  PATH = '/queues'
+
+  def __init__(self):
+    self.stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
+
+  def get(self):
+    """Shows template displaying the configured task queues."""
+    values = {
+      'request': self.request,
+      'queues': self.stub.GetQueues(),
+    }
+    self.generate('queues.html', values)
+
+  def post(self):
+    """Handle modifying actions and/or redirect to GET page."""
+
+    if self.request.get('action:flushqueue'):
+      self.stub.FlushQueue(self.request.get('queue'))
+    self.redirect(self.request.path_url)
+
+
+class TasksPageHandler(BaseRequestHandler):
+  """Shows information about a queue's tasks."""
+
+  PATH = '/tasks'
+
+  PAGE_SIZE = 20
+
+  def __init__(self):
+    self.stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
+
+  def get(self):
+    """Shows template displaying the queue's tasks."""
+    queue = self.request.get('queue')
+    start = int(self.request.get('start', 0))
+    all_tasks = self.stub.GetTasks(queue)
+
+    next_start = start + self.PAGE_SIZE
+    tasks = all_tasks[start:next_start]
+    current_page = int(start / self.PAGE_SIZE) + 1
+    pages = []
+    for number in xrange(int(math.ceil(len(all_tasks) /
+                                       float(self.PAGE_SIZE)))):
+      pages.append({
+        'number': number + 1,
+        'start': number * self.PAGE_SIZE
+      })
+    if not all_tasks[next_start:]:
+      next_start = -1
+    prev_start = start - self.PAGE_SIZE
+    if prev_start < 0:
+      prev_start = -1
+
+    values = {
+      'request': self.request,
+      'queue_name': queue,
+      'tasks': tasks,
+      'start_base_url': self.filter_url(['queue']),
+      'prev_start': prev_start,
+      'next_start': next_start,
+      'pages': pages,
+      'current_page': current_page,
+    }
+    self.generate('tasks.html', values)
+
+  def post(self):
+    if self.request.get('action:deletetask'):
+      self.stub.DeleteTask(self.request.get('queue'), self.request.get('task'))
+    self.redirect(self.request.path_url + '?queue=' + self.request.get('queue'))
+    return
+
+
 class MemcachePageHandler(BaseRequestHandler):
   """Shows stats about memcache and query form to get values."""
   PATH = '/memcache'
@@ -1161,6 +1238,8 @@
     ('.*' + InteractiveExecuteHandler.PATH, InteractiveExecuteHandler),
     ('.*' + MemcachePageHandler.PATH, MemcachePageHandler),
     ('.*' + ImageHandler.PATH, ImageHandler),
+    ('.*' + QueuesPageHandler.PATH, QueuesPageHandler),
+    ('.*' + TasksPageHandler.PATH, TasksPageHandler),
     ('.*', DefaultPageHandler),
   ]
   if HAVE_CRON:
--- a/thirdparty/google_appengine/google/appengine/ext/admin/templates/base.html	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/templates/base.html	Fri Jun 19 16:13:32 2009 +0200
@@ -37,6 +37,7 @@
               <li><a href="{{ datastore_path }}">Datastore Viewer</a></li>
               <li><a href="{{ interactive_path }}">Interactive Console</a></li>
               <li><a href="{{ memcache_path }}">Memcache Viewer</a></li>
+              <li><a href="{{ queues_path }}">Task Queues</a></li>
               {% if cron_path %}
               <li><a href="{{ cron_path }}">Cron Jobs</a></li>
               {% endif %}
@@ -54,7 +55,7 @@
     
         <div id="ft">
           <p>
-            &copy;2008 Google
+            &copy;2009 Google
           </p>
         </div>
     {% block final %}{% endblock %}
--- a/thirdparty/google_appengine/google/appengine/ext/admin/templates/css/ae.css	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/templates/css/ae.css	Fri Jun 19 16:13:32 2009 +0200
@@ -42,6 +42,7 @@
 #ae-content {
   padding-left: 1em;
   border-left: 3px solid #e5ecf9;
+  min-height: 200px;
 }
 
 /* Tables */
@@ -148,3 +149,6 @@
   border: 2px solid #f00;
   background-color: #fee;
 }
+.ae-table .ae-pager {
+  background-color: #c5d7ef;
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/templates/css/queues.css	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,26 @@
+.ah-queues-message {
+  color: red;
+  margin-bottom: 1em;
+}
+
+#ah-queues .ah-queues-message {
+  margin: 1em;
+}
+
+.ah-queues-times {
+  margin-top: 1em;
+}
+#ah-queues .ae-table,
+#ah-queues .ae-table td {
+  border: 0;
+  padding: 0;
+}
+#ah-queues ol {
+  list-style: none;
+}
+#ah-queues li {
+  padding: .2em 0;
+}
+.ah-queues-test {
+  text-align: right;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/templates/css/tasks.css	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,26 @@
+.ah-tasks-message {
+  color: red;
+  margin-bottom: 1em;
+}
+
+#ah-tasks .ah-tasks-message {
+  margin: 1em;
+}
+
+.ah-task-times {
+  margin-top: 1em;
+}
+#ah-tasks .ae-table,
+#ah-tasks .ae-table td {
+  border: 0;
+  padding: 0;
+}
+#ah-tasks ol {
+  list-style: none;
+}
+#ah-tasks li {
+  padding: .2em 0;
+}
+.ah-task-test {
+  text-align: right;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/templates/js/webhook.js	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,74 @@
+// Copyright 2009 Google Inc.  All Rights Reserved.
+
+function Webhook(formId) {
+  this.formId = formId;
+  this.action = null;
+  this.headers = {};
+  this.method = null;
+  this.payload = null;
+};
+
+Webhook.prototype.HEADER_KEY = 'header:';
+
+Webhook.prototype.parse = function() {
+  var form = document.getElementById(this.formId);
+  if (form == null) {
+    return 'could not find form with id "' + this.formId + '"';
+  };
+  this.action = form.action;
+  this.method = form.method;
+  for (var i = 0, n = form.elements.length; i < n; i++) {
+    var currentElement = form.elements[i];
+    if (currentElement.tagName != 'INPUT' ||
+        currentElement.type.toUpperCase() != 'HIDDEN') {
+      continue;
+    }
+    var key = currentElement.name;
+    var value = currentElement.value;
+    var headerIndex = key.indexOf(this.HEADER_KEY);
+    if (headerIndex == 0) {
+      var header = key.substr(this.HEADER_KEY.length);
+      this.headers[header] = value;
+    } else if (key == 'payload') {
+      this.payload = value;
+    }
+  }
+  
+  if (this.action == '') {
+    return 'action not found';
+  }
+  if (this.method == '') {
+    return 'method not found';
+  }
+  return '';
+};
+
+Webhook.prototype.send = function(callback) {
+  var req = null;
+  if (window.XMLHttpRequest) {
+    req = new XMLHttpRequest();
+  } else if (window.ActiveXObject) {
+    req = new ActiveXObject('MSXML2.XMLHTTP.3.0');
+  }
+
+  try {
+    req.open(this.method, this.action, false);
+    for (var key in this.headers) {
+      req.setRequestHeader(key, this.headers[key]);
+    };
+    req.send(this.payload);
+  } catch (e) {
+    callback(this, req, e);
+    return;
+  }
+  callback(this, req, null);
+};
+
+Webhook.prototype.run = function(callback) {
+  var error = this.parse();
+  if (error != '') {
+    callback(this, null, error);
+  } else {
+    this.send(callback);
+  }
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/templates/queues.html	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,75 @@
+{% extends "base.html" %}
+
+{% block title %}
+{{ application_name }} Development Console - Task Queue Viewer{% endblock %}
+
+{% block head %}
+  <style type="text/css">{% include "css/queues.css" %}</style>
+{% endblock %}
+
+{% block breadcrumbs %}
+  <span class="item"><a href="">Queue Viewer</a></span>
+{% endblock %}
+
+{% block body %}
+<h3>Task Queues</h3>
+
+{% if queues %}
+  <p>
+  Tasks will not run automatically. Select a queue to run tasks manually.
+  </p>
+
+  <table id="ah-queues" class="ae-table ae-table-striped">
+    <thead>
+      <tr>
+        <th>Queue Name</th>
+        <th>Maximum Rate</th>
+        <th>Bucket Size</th>
+        <th>Oldest Task (UTC)</th>
+        <th>Tasks in Queue</th>
+        <th></th>
+      </tr>
+    </thead>
+    <tbody>
+      {% for queue in queues %}
+        <tr class="{% cycle ae-odd,ae-even %}">
+          <td valign="top">
+            <a href="/_ah/admin/tasks?queue={{ queue.name|escape }}">
+            {{ queue.name|escape }}</a>
+          </td>
+          <td valign="top">
+            {{ queue.max_rate|escape }}
+          </td>
+          <td valign="top">
+            {{ queue.bucket_size|escape }}
+          </td>
+          <td valign="top">
+            {% if queue.oldest_task %}
+              {{ queue.oldest_task|escape }}<br/>
+              ({{ queue.eta_delta|escape }})
+            {% else %}
+              None
+            {% endif %}
+          </td>
+          <td valign="top">
+            {{ queue.tasks_in_queue|escape }}
+          </td>
+          <td valign="top">
+            <form id="flushform" action="/_ah/admin/queues" method="post">
+            <input type="hidden" name="queue" value="{{ queue.name|escape }}"/>
+            <input type="submit" name="action:flushqueue" value="Flush Queue"
+             onclick="return confirm('Are you sure you want to flush all ' +
+                                     'tasks from {{ queue.name|escape }}?');"/>
+            </form>
+          </td>
+        </tr>
+      {% endfor %}
+    </tbody>
+  </table>
+{% else %}
+  This application doesn't define any task queues. See the documentation for more.
+{% endif %}
+
+
+{% endblock %}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/templates/tasks.html	Fri Jun 19 16:13:32 2009 +0200
@@ -0,0 +1,103 @@
+{% extends "base.html" %}
+
+{% block title %}
+{{ application_name }} Development Console - Tasks Viewer{% endblock %}
+
+{% block head %}
+  <style type="text/css">{% include "css/pager.css" %}</style>
+  <style type="text/css">{% include "css/tasks.css" %}</style>
+  <script type="text/javascript">
+    {% include "js/webhook.js" %}
+
+    var handleTaskResult = function(hook, req, error) {
+      if (error != null) {
+        return;
+      };
+      if (req == null) {
+        return;
+      };
+      if (req.status != 200) {
+        return;
+      };
+      var parts = hook.formId.split('.');// + [''];
+      var deleteForm = document.getElementById('deleteform.' + parts[1]);
+      if (deleteForm != null) {
+        deleteForm.submit();
+      };
+    };
+  </script>
+{% endblock %}
+
+{% block breadcrumbs %}
+  <span class="item"><a href="">Tasks Viewer</a></span>
+{% endblock %}
+
+{% block body %}
+<h3>Tasks for Queue: {{ queue_name|escape }}</h3>
+
+{% if tasks %}
+  <p>
+  Tasks will not run automatically. Push the 'Run' button to execute each task.
+  </p>
+
+  <table id="ah-tasks" class="ae-table ae-table-striped">
+    <thead>
+      <tr>
+        <th>Task Name</th>
+        <th>ETA (UTC)</th>
+        <th>Method</th>
+        <th>URL</th>
+        <th></th>
+        <th></th>
+      </tr>
+    </thead>
+    <tbody>
+      {% for task in tasks %}
+        <tr class="{% cycle ae-odd,ae-even %}">
+          <td valign="top">
+            {{ task.name|escape }}
+          </td>
+          <td valign="top">
+            {{ task.eta|escape }} ({{ task.eta_delta|escape }})
+          </td>
+          <td valign="top">
+            {{ task.method|escape }}
+          </td>
+          <td valign="top">
+            {{ task.url|escape }}
+          </td>
+          <td valign="top">
+            <form id="runform.{{ task.name|escape }}" action="{{ task.url|escape }}" method="{{ task.method|escape }}" onsubmit="(new Webhook('runform.{{ task.name|escape }}')).run(handleTaskResult); return false">
+            <input type="hidden" name="payload" value="{{ task.body|escape }}">
+            {% for header in task.headers.items %}
+              <input type="hidden" name="header:{{ header.0|escape }}"
+               value="{{ header.1|escape }}"/>
+            {% endfor %}
+            <input type="submit" value="Run"/>
+            </form>
+          </td>
+          <td valign="top">
+            <form id="deleteform.{{ task.name|escape }}" action="/_ah/admin/tasks" method="post">
+            <input type="hidden" name="queue" value="{{ queue_name|escape }}"/>
+            <input type="hidden" name="task" value="{{ task.name|escape }}"/>
+            <input type="hidden" name="action:deletetask" value="true"/>
+            <input type="submit" value="Delete"/>
+            </form>
+          </td>
+        </tr>
+      {% endfor %}
+      <tr>
+        <td colspan="6" class="ae-pager" align="right">
+          {% include "pager.html" %}
+        </td>
+      </tr>
+    </tbody>
+  </table>
+
+{% else %}
+  This queue doesn't contain any tasks.
+{% endif %}
+
+
+{% endblock %}
+
--- a/thirdparty/google_appengine/google/appengine/ext/db/__init__.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/ext/db/__init__.py	Fri Jun 19 16:13:32 2009 +0200
@@ -77,6 +77,7 @@
 
 
 
+
 import copy
 import datetime
 import logging
@@ -2610,8 +2611,13 @@
 
     if self.collection_name is None:
       self.collection_name = '%s_set' % (model_class.__name__.lower())
-    if hasattr(self.reference_class, self.collection_name):
-      raise DuplicatePropertyError('Class %s already has property %s'
+    existing_prop = getattr(self.reference_class, self.collection_name, None)
+    if existing_prop is not None:
+      if not (isinstance(existing_prop, _ReverseReferenceProperty) and
+              existing_prop._prop_name == property_name and
+              existing_prop._model.__name__ == model_class.__name__ and
+              existing_prop._model.__module__ == model_class.__module__):
+        raise DuplicatePropertyError('Class %s already has property %s '
                                    % (self.reference_class.__name__,
                                       self.collection_name))
     setattr(self.reference_class,
@@ -2759,13 +2765,23 @@
     Constructor does not take standard values of other property types.
 
     Args:
-      model: Model that this property is a collection of.
-      property: Foreign property on referred model that points back to this
-        properties entity.
+      model: Model class that this property is a collection of.
+      property: Name of foreign property on referred model that points back
+        to this properties entity.
     """
     self.__model = model
     self.__property = prop
 
+  @property
+  def _model(self):
+    """Internal helper to access the model class, read-only."""
+    return self.__model
+
+  @property
+  def _prop_name(self):
+    """Internal helper to access the property name, read-only."""
+    return self.__property
+
   def __get__(self, model_instance, model_class):
     """Fetches collection of model instances of this collection property."""
     if model_instance is not None:
--- a/thirdparty/google_appengine/google/appengine/ext/db/djangoforms.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/ext/db/djangoforms.py	Fri Jun 19 16:13:32 2009 +0200
@@ -105,8 +105,9 @@
 def monkey_patch(name, bases, namespace):
   """A 'metaclass' for adding new methods to an existing class.
 
-  In this version, existing methods can't be overridden; this is by
-  design, to avoid accidents.
+  This shouldn't be used to override existing methods.  However,
+  because loading this module (like loading any module) should be
+  idempotent, we don't assert that.
 
   Usage example:
 
@@ -131,7 +132,6 @@
   base = bases[0]
   for name, value in namespace.iteritems():
     if name not in ('__metaclass__', '__module__'):
-      assert name not in base.__dict__, "Won't override attribute %r" % (name,)
       setattr(base, name, value)
   return base
 
--- a/thirdparty/google_appengine/google/appengine/ext/gql/__init__.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/ext/gql/__init__.py	Fri Jun 19 16:13:32 2009 +0200
@@ -676,6 +676,10 @@
     """Return the result ordering list."""
     return self.__orderings
 
+  def is_keys_only(self):
+    """Returns True if this query returns Keys, False if it returns Entities."""
+    return self._keys_only
+
   __iter__ = Run
 
   __result_type_regex = re.compile(r'(\*|__key__)')
--- a/thirdparty/google_appengine/google/appengine/ext/preload/__init__.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/ext/preload/__init__.py	Fri Jun 19 16:13:32 2009 +0200
@@ -189,7 +189,6 @@
 import zipfile
 import zlib
 
-import django
 import neo_cs
 import neo_util
 import webob
@@ -202,13 +201,11 @@
 from google.appengine.api import urlfetch
 from google.appengine.api import users
 
-from google.appengine.ext import admin
 from google.appengine.ext import bulkload
 from google.appengine.ext import db
 from google.appengine.ext import gql
 from google.appengine.ext import search
 from google.appengine.ext import webapp
-from google.appengine.ext.webapp import template
 
 from google.appengine.runtime import apiproxy
 
--- a/thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py	Fri Jun 19 16:13:32 2009 +0200
@@ -428,7 +428,7 @@
   if not app_id:
     if not rtok:
       random.seed()
-      rtok = str(random.randint)
+      rtok = str(random.random())[2:]
     urlargs = {'rtok': rtok}
     response = server.Send(path, payload=None, **urlargs)
     if not response.startswith('{'):
--- a/thirdparty/google_appengine/google/appengine/tools/appcfg.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/tools/appcfg.py	Fri Jun 19 16:13:32 2009 +0200
@@ -48,6 +48,7 @@
 from google.appengine.cron import groctimespecification
 from google.appengine.api import appinfo
 from google.appengine.api import croninfo
+from google.appengine.api import queueinfo
 from google.appengine.api import validation
 from google.appengine.api import yaml_errors
 from google.appengine.api import yaml_object
@@ -57,21 +58,21 @@
 
 
 MAX_FILES_TO_CLONE = 100
-LIST_DELIMITER = "\n"
-TUPLE_DELIMITER = "|"
+LIST_DELIMITER = '\n'
+TUPLE_DELIMITER = '|'
 
-VERSION_FILE = "../VERSION"
+VERSION_FILE = '../VERSION'
 
 UPDATE_CHECK_TIMEOUT = 3
 
-NAG_FILE = ".appcfg_nag"
+NAG_FILE = '.appcfg_nag'
 
 MAX_LOG_LEVEL = 4
 
 verbosity = 1
 
 
-appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = "python"
+appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = 'python'
 _api_versions = os.environ.get('GOOGLE_TEST_API_VERSIONS', '1')
 _options = validation.Options(*_api_versions.split(','))
 appinfo.AppInfoExternal.ATTRIBUTES[appinfo.API_VERSION] = _options
@@ -106,9 +107,9 @@
   """
   for handler in config.handlers:
     handler_type = handler.GetHandlerType()
-    if handler_type in ("static_dir", "static_files"):
-      if handler_type == "static_dir":
-        regex = os.path.join(re.escape(handler.GetHandler()), ".*")
+    if handler_type in ('static_dir', 'static_files'):
+      if handler_type == 'static_dir':
+        regex = os.path.join(re.escape(handler.GetHandler()), '.*')
       else:
         regex = handler.upload
       if re.match(regex, filename):
@@ -117,8 +118,8 @@
         else:
           guess = mimetypes.guess_type(filename)[0]
           if guess is None:
-            default = "application/octet-stream"
-            print >>sys.stderr, ("Could not guess mimetype for %s.  Using %s."
+            default = 'application/octet-stream'
+            print >>sys.stderr, ('Could not guess mimetype for %s.  Using %s.'
                                  % (filename, default))
             return default
           return guess
@@ -153,8 +154,8 @@
   """
 
   ATTRIBUTES = {
-      "timestamp": validation.TYPE_FLOAT,
-      "opt_in": validation.Optional(validation.TYPE_BOOL),
+      'timestamp': validation.TYPE_FLOAT,
+      'opt_in': validation.Optional(validation.TYPE_BOOL),
   }
 
   @staticmethod
@@ -183,10 +184,10 @@
   version_filename = os.path.join(os.path.dirname(google.__file__),
                                   VERSION_FILE)
   if not isfile(version_filename):
-    logging.error("Could not find version file at %s", version_filename)
+    logging.error('Could not find version file at %s', version_filename)
     return None
 
-  version_fh = open_fn(version_filename, "r")
+  version_fh = open_fn(version_filename, 'r')
   try:
     version = yaml.safe_load(version_fh)
   finally:
@@ -194,28 +195,29 @@
 
   return version
 
-def RetryWithBackoff(initial_delay, backoff_factor, max_tries, callable):
-    """Calls a function multiple times, backing off more and more each time.
+
+def RetryWithBackoff(initial_delay, backoff_factor, max_tries, callable_func):
+  """Calls a function multiple times, backing off more and more each time.
 
-    Args:
-      initial_delay: Initial delay after first try, in seconds.
-      backoff_factor: Delay will be multiplied by this factor after each try.
-      max_tries: Maximum number of tries.
-      callable: The method to call, will pass no arguments.
+  Args:
+    initial_delay: Initial delay after first try, in seconds.
+    backoff_factor: Delay will be multiplied by this factor after each try.
+    max_tries: Maximum number of tries.
+    callable_func: The method to call, will pass no arguments.
 
-    Returns:
-      True if the function succeded in one of its tries.
+  Returns:
+    True if the function succeded in one of its tries.
 
-    Raises:
-      Whatever the function raises--an exception will immediately stop retries.
-    """
-    delay = initial_delay
-    while not callable() and max_tries > 0:
-      StatusUpdate("Will check again in %s seconds." % delay)
-      time.sleep(delay)
-      delay *= backoff_factor
-      max_tries -= 1
-    return max_tries > 0
+  Raises:
+    Whatever the function raises--an exception will immediately stop retries.
+  """
+  delay = initial_delay
+  while not callable_func() and max_tries > 0:
+    StatusUpdate('Will check again in %s seconds.' % delay)
+    time.sleep(delay)
+    delay *= backoff_factor
+    max_tries -= 1
+  return max_tries > 0
 
 
 class UpdateCheck(object):
@@ -260,13 +262,13 @@
   @staticmethod
   def MakeNagFilename():
     """Returns the filename for the nag file for this user."""
-    user_homedir = os.path.expanduser("~/")
+    user_homedir = os.path.expanduser('~/')
     if not os.path.isdir(user_homedir):
       drive, unused_tail = os.path.splitdrive(os.__file__)
       if drive:
-        os.environ["HOMEDRIVE"] = drive
+        os.environ['HOMEDRIVE'] = drive
 
-    return os.path.expanduser("~/" + NAG_FILE)
+    return os.path.expanduser('~/' + NAG_FILE)
 
   def _ParseVersionFile(self):
     """Parse the local VERSION file.
@@ -287,14 +289,14 @@
     """
     version = self._ParseVersionFile()
     if version is None:
-      logging.error("Could not determine if the SDK supports the api_version "
-                    "requested in app.yaml.")
+      logging.error('Could not determine if the SDK supports the api_version '
+                    'requested in app.yaml.')
       return
-    if self.config.api_version not in version["api_versions"]:
-      logging.critical("The api_version specified in app.yaml (%s) is not "
-                       "supported by this release of the SDK.  The supported "
-                       "api_versions are %s.",
-                       self.config.api_version, version["api_versions"])
+    if self.config.api_version not in version['api_versions']:
+      logging.critical('The api_version specified in app.yaml (%s) is not '
+                       'supported by this release of the SDK.  The supported '
+                       'api_versions are %s.',
+                       self.config.api_version, version['api_versions'])
       sys.exit(1)
 
   def CheckForUpdates(self):
@@ -303,9 +305,9 @@
     Queries the server for the latest SDK version at the same time reporting
     the local SDK version.  The server will respond with a yaml document
     containing the fields:
-      "release": The name of the release (e.g. 1.2).
-      "timestamp": The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
-      "api_versions": A list of api_version strings (e.g. ['1', 'beta']).
+      'release': The name of the release (e.g. 1.2).
+      'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
+      'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
 
     We will nag the user with increasing severity if:
     - There is a new release.
@@ -315,42 +317,42 @@
     """
     version = self._ParseVersionFile()
     if version is None:
-      logging.info("Skipping update check")
+      logging.info('Skipping update check')
       return
-    logging.info("Checking for updates to the SDK.")
+    logging.info('Checking for updates to the SDK.')
 
     try:
-      response = self.server.Send("/api/updatecheck",
+      response = self.server.Send('/api/updatecheck',
                                   timeout=UPDATE_CHECK_TIMEOUT,
-                                  release=version["release"],
-                                  timestamp=version["timestamp"],
-                                  api_versions=version["api_versions"])
+                                  release=version['release'],
+                                  timestamp=version['timestamp'],
+                                  api_versions=version['api_versions'])
     except urllib2.URLError, e:
-      logging.info("Update check failed: %s", e)
+      logging.info('Update check failed: %s', e)
       return
 
     latest = yaml.safe_load(response)
-    if latest["release"] == version["release"]:
-      logging.info("The SDK is up to date.")
+    if latest['release'] == version['release']:
+      logging.info('The SDK is up to date.')
       return
 
-    api_versions = latest["api_versions"]
+    api_versions = latest['api_versions']
     if self.config.api_version not in api_versions:
       self._Nag(
-          "The api version you are using (%s) is obsolete!  You should\n"
-          "upgrade your SDK and test that your code works with the new\n"
-          "api version." % self.config.api_version,
+          'The api version you are using (%s) is obsolete!  You should\n'
+          'upgrade your SDK and test that your code works with the new\n'
+          'api version.' % self.config.api_version,
           latest, version, force=True)
       return
 
     if self.config.api_version != api_versions[len(api_versions) - 1]:
       self._Nag(
-          "The api version you are using (%s) is deprecated. You should\n"
-          "upgrade your SDK to try the new functionality." %
+          'The api version you are using (%s) is deprecated. You should\n'
+          'upgrade your SDK to try the new functionality.' %
           self.config.api_version, latest, version)
       return
 
-    self._Nag("There is a new release of the SDK available.",
+    self._Nag('There is a new release of the SDK available.',
               latest, version)
 
   def _ParseNagFile(self):
@@ -361,7 +363,7 @@
     """
     nag_filename = UpdateCheck.MakeNagFilename()
     if self.isfile(nag_filename):
-      fh = self.open(nag_filename, "r")
+      fh = self.open(nag_filename, 'r')
       try:
         nag = NagFile.Load(fh)
       finally:
@@ -380,13 +382,13 @@
     """
     nagfilename = UpdateCheck.MakeNagFilename()
     try:
-      fh = self.open(nagfilename, "w")
+      fh = self.open(nagfilename, 'w')
       try:
         fh.write(nag.ToYAML())
       finally:
         fh.close()
     except (OSError, IOError), e:
-      logging.error("Could not write nag file to %s. Error: %s", nagfilename, e)
+      logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
 
   def _Nag(self, msg, latest, version, force=False):
     """Prints a nag message and updates the nag file's timestamp.
@@ -406,7 +408,7 @@
     if nag and not force:
       last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
       if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
-        logging.debug("Skipping nag message")
+        logging.debug('Skipping nag message')
         return
 
     if nag is None:
@@ -414,17 +416,17 @@
     nag.timestamp = time.time()
     self._WriteNagFile(nag)
 
-    print "****************************************************************"
+    print '****************************************************************'
     print msg
-    print "-----------"
-    print "Latest SDK:"
+    print '-----------'
+    print 'Latest SDK:'
     print yaml.dump(latest)
-    print "-----------"
-    print "Your SDK:"
+    print '-----------'
+    print 'Your SDK:'
     print yaml.dump(version)
-    print "-----------"
-    print "Please visit http://code.google.com/appengine for the latest SDK"
-    print "****************************************************************"
+    print '-----------'
+    print 'Please visit http://code.google.com/appengine for the latest SDK'
+    print '****************************************************************'
 
   def AllowedToCheckForUpdates(self, input_fn=raw_input):
     """Determines if the user wants to check for updates.
@@ -450,16 +452,16 @@
       nag.timestamp = time.time()
 
     if nag.opt_in is None:
-      answer = input_fn("Allow dev_appserver to check for updates on startup? "
-                        "(Y/n): ")
+      answer = input_fn('Allow dev_appserver to check for updates on startup? '
+                        '(Y/n): ')
       answer = answer.strip().lower()
-      if answer == "n" or answer == "no":
-        print ("dev_appserver will not check for updates on startup.  To "
-               "change this setting, edit %s" % UpdateCheck.MakeNagFilename())
+      if answer == 'n' or answer == 'no':
+        print ('dev_appserver will not check for updates on startup.  To '
+               'change this setting, edit %s' % UpdateCheck.MakeNagFilename())
         nag.opt_in = False
       else:
-        print ("dev_appserver will check for updates on startup.  To change "
-               "this setting, edit %s" % UpdateCheck.MakeNagFilename())
+        print ('dev_appserver will check for updates on startup.  To change '
+               'this setting, edit %s' % UpdateCheck.MakeNagFilename())
         nag.opt_in = True
       self._WriteNagFile(nag)
     return nag.opt_in
@@ -483,8 +485,8 @@
 
   def DoUpload(self):
     """Uploads the index definitions."""
-    StatusUpdate("Uploading index definitions.")
-    self.server.Send("/api/datastore/index/add",
+    StatusUpdate('Uploading index definitions.')
+    self.server.Send('/api/datastore/index/add',
                      app_id=self.config.application,
                      version=self.config.version,
                      payload=self.definitions.ToYAML())
@@ -508,13 +510,38 @@
 
   def DoUpload(self):
     """Uploads the cron entries."""
-    StatusUpdate("Uploading cron entries.")
-    self.server.Send("/api/datastore/cron/update",
+    StatusUpdate('Uploading cron entries.')
+    self.server.Send('/api/datastore/cron/update',
                      app_id=self.config.application,
                      version=self.config.version,
                      payload=self.cron.ToYAML())
 
 
+class QueueEntryUpload(object):
+  """Provides facilities to upload task queue entries to the hosting service."""
+
+  def __init__(self, server, config, queue):
+    """Creates a new QueueEntryUpload.
+
+    Args:
+      server: The RPC server to use.  Should be an instance of a subclass of
+      AbstractRpcServer
+      config: The AppInfoExternal object derived from the app.yaml file.
+      queue: The QueueInfoExternal object loaded from the queue.yaml file.
+    """
+    self.server = server
+    self.config = config
+    self.queue = queue
+
+  def DoUpload(self):
+    """Uploads the task queue entries."""
+    StatusUpdate('Uploading task queue entries.')
+    self.server.Send('/api/queue/update',
+                     app_id=self.config.application,
+                     version=self.config.version,
+                     payload=self.queue.ToYAML())
+
+
 class IndexOperation(object):
   """Provide facilities for writing Index operation commands."""
 
@@ -543,8 +570,8 @@
       present on the server but missing from the index.yaml file (indicating
       that these indexes should probably be vacuumed).
     """
-    StatusUpdate("Fetching index definitions diff.")
-    response = self.server.Send("/api/datastore/index/diff",
+    StatusUpdate('Fetching index definitions diff.')
+    response = self.server.Send('/api/datastore/index/diff',
                                 app_id=self.config.application,
                                 payload=definitions.ToYAML())
     return datastore_index.ParseMultipleIndexDefinitions(response)
@@ -561,8 +588,8 @@
       be normal behavior as there is a potential race condition between fetching
       the index-diff and sending deletion confirmation through.
     """
-    StatusUpdate("Deleting selected index definitions.")
-    response = self.server.Send("/api/datastore/index/delete",
+    StatusUpdate('Deleting selected index definitions.')
+    response = self.server.Send('/api/datastore/index/delete',
                                 app_id=self.config.application,
                                 payload=definitions.ToYAML())
     return datastore_index.ParseIndexDefinitions(response)
@@ -608,24 +635,24 @@
       True if user enters 'y' or 'a'.  False if user enter 'n'.
     """
     while True:
-      print "This index is no longer defined in your index.yaml file."
+      print 'This index is no longer defined in your index.yaml file.'
       print
       print index.ToYAML()
       print
 
       confirmation = self.confirmation_fn(
-          "Are you sure you want to delete this index? (N/y/a): ")
+          'Are you sure you want to delete this index? (N/y/a): ')
       confirmation = confirmation.strip().lower()
 
-      if confirmation == "y":
+      if confirmation == 'y':
         return True
-      elif confirmation == "n" or not confirmation:
+      elif confirmation == 'n' or not confirmation:
         return False
-      elif confirmation == "a":
+      elif confirmation == 'a':
         self.force = True
         return True
       else:
-        print "Did not understand your response."
+        print 'Did not understand your response.'
 
   def DoVacuum(self, definitions):
     """Vacuum indexes in datastore.
@@ -659,11 +686,11 @@
       if not_deleted.indexes:
         not_deleted_count = len(not_deleted.indexes)
         if not_deleted_count == 1:
-          warning_message = ("An index was not deleted.  Most likely this is "
-                             "because it no longer exists.\n\n")
+          warning_message = ('An index was not deleted.  Most likely this is '
+                             'because it no longer exists.\n\n')
         else:
-          warning_message = ("%d indexes were not deleted.  Most likely this "
-                             "is because they no longer exist.\n\n"
+          warning_message = ('%d indexes were not deleted.  Most likely this '
+                             'is because they no longer exist.\n\n'
                              % not_deleted_count)
         for index in not_deleted.indexes:
           warning_message += index.ToYAML()
@@ -674,7 +701,7 @@
   """Provide facilities to export request logs."""
 
   def __init__(self, server, config, output_file,
-               num_days, append, severity, now, vhost):
+               num_days, append, severity, now, vhost, include_vhost):
     """Constructor.
 
     Args:
@@ -687,6 +714,7 @@
       severity: App log severity to request (0-4); None for no app logs.
       now: POSIX timestamp used for calculating valid dates for num_days.
       vhost: The virtual host of log messages to get. None for all hosts.
+      include_vhost: If true, the virtual host is included in log messages.
     """
     self.server = server
     self.config = config
@@ -695,21 +723,22 @@
     self.num_days = num_days
     self.severity = severity
     self.vhost = vhost
-    self.version_id = self.config.version + ".1"
+    self.include_vhost = include_vhost
+    self.version_id = self.config.version + '.1'
     self.sentinel = None
-    self.write_mode = "w"
+    self.write_mode = 'w'
     if self.append:
       self.sentinel = FindSentinel(self.output_file)
-      self.write_mode = "a"
+      self.write_mode = 'a'
     self.valid_dates = None
     if self.num_days:
       patterns = []
       now = PacificTime(now)
       for i in xrange(self.num_days):
         then = time.gmtime(now - 24*3600 * i)
-        patterns.append(re.escape(time.strftime("%d/%m/%Y", then)))
-        patterns.append(re.escape(time.strftime("%d/%b/%Y", then)))
-      self.valid_dates = re.compile(r"[^[]+\[(" + "|".join(patterns) + r"):")
+        patterns.append(re.escape(time.strftime('%d/%m/%Y', then)))
+        patterns.append(re.escape(time.strftime('%d/%b/%Y', then)))
+      self.valid_dates = re.compile(r'[^[]+\[(' + '|'.join(patterns) + r'):')
 
   def DownloadLogs(self):
     """Download the requested logs.
@@ -718,7 +747,7 @@
     self.output_file, or to stdout if the filename is '-'.
     Multiple roundtrips to the server may be made.
     """
-    StatusUpdate("Downloading request logs for %s %s." %
+    StatusUpdate('Downloading request logs for %s %s.' %
                  (self.config.application, self.version_id))
     tf = tempfile.TemporaryFile()
     offset = None
@@ -729,16 +758,16 @@
           if not offset:
             break
         except KeyboardInterrupt:
-          StatusUpdate("Keyboard interrupt; saving data downloaded so far.")
+          StatusUpdate('Keyboard interrupt; saving data downloaded so far.')
           break
-      StatusUpdate("Copying request logs to %r." % self.output_file)
-      if self.output_file == "-":
+      StatusUpdate('Copying request logs to %r.' % self.output_file)
+      if self.output_file == '-':
         of = sys.stdout
       else:
         try:
           of = open(self.output_file, self.write_mode)
         except IOError, err:
-          StatusUpdate("Can't write %r: %s." % (self.output_file, err))
+          StatusUpdate('Can\'t write %r: %s.' % (self.output_file, err))
           sys.exit(1)
       try:
         line_count = CopyReversedLines(tf, of)
@@ -748,7 +777,7 @@
           of.close()
     finally:
       tf.close()
-    StatusUpdate("Copied %d records." % line_count)
+    StatusUpdate('Copied %d records.' % line_count)
 
   def RequestLogLines(self, tf, offset):
     """Make a single roundtrip to the server.
@@ -763,28 +792,30 @@
       The offset string to be used for the next request, if another
       request should be issued; or None, if not.
     """
-    logging.info("Request with offset %r.", offset)
-    kwds = {"app_id": self.config.application,
-            "version": self.version_id,
-            "limit": 100,
+    logging.info('Request with offset %r.', offset)
+    kwds = {'app_id': self.config.application,
+            'version': self.version_id,
+            'limit': 100,
            }
     if offset:
-      kwds["offset"] = offset
+      kwds['offset'] = offset
     if self.severity is not None:
-      kwds["severity"] = str(self.severity)
+      kwds['severity'] = str(self.severity)
     if self.vhost is not None:
-      kwds["vhost"] = str(self.vhost)
-    response = self.server.Send("/api/request_logs", payload=None, **kwds)
-    response = response.replace("\r", "\0")
+      kwds['vhost'] = str(self.vhost)
+    if self.include_vhost is not None:
+      kwds['include_vhost'] = str(self.include_vhost)
+    response = self.server.Send('/api/request_logs', payload=None, **kwds)
+    response = response.replace('\r', '\0')
     lines = response.splitlines()
-    logging.info("Received %d bytes, %d records.", len(response), len(lines))
+    logging.info('Received %d bytes, %d records.', len(response), len(lines))
     offset = None
-    if lines and lines[0].startswith("#"):
-      match = re.match(r"^#\s*next_offset=(\S+)\s*$", lines[0])
+    if lines and lines[0].startswith('#'):
+      match = re.match(r'^#\s*next_offset=(\S+)\s*$', lines[0])
       del lines[0]
       if match:
         offset = match.group(1)
-    if lines and lines[-1].startswith("#"):
+    if lines and lines[-1].startswith('#'):
       del lines[-1]
     valid_dates = self.valid_dates
     sentinel = self.sentinel
@@ -794,10 +825,10 @@
     for line in lines:
       if ((sentinel and
            line.startswith(sentinel) and
-           line[len_sentinel : len_sentinel+1] in ("", "\0")) or
+           line[len_sentinel : len_sentinel+1] in ('', '\0')) or
           (valid_dates and not valid_dates.match(line))):
         return None
-      tf.write(line + "\n")
+      tf.write(line + '\n')
     if not lines:
       return None
     return offset
@@ -861,9 +892,9 @@
   r"""Copy lines from input stream to output stream in reverse order.
 
   As a special feature, null bytes in the input are turned into
-  newlines followed by tabs in the output, but these "sub-lines"
+  newlines followed by tabs in the output, but these 'sub-lines'
   separated by null bytes are not reversed.  E.g. If the input is
-  "A\0B\nC\0D\n", the output is "C\n\tD\nA\n\tB\n".
+  'A\0B\nC\0D\n', the output is 'C\n\tD\nA\n\tB\n'.
 
   Args:
     instream: A seekable stream open for reading in binary mode.
@@ -876,20 +907,20 @@
   line_count = 0
   instream.seek(0, 2)
   last_block = instream.tell() // blocksize
-  spillover = ""
+  spillover = ''
   for iblock in xrange(last_block + 1, -1, -1):
     instream.seek(iblock * blocksize)
     data = instream.read(blocksize)
     lines = data.splitlines(True)
-    lines[-1:] = "".join(lines[-1:] + [spillover]).splitlines(True)
-    if lines and not lines[-1].endswith("\n"):
-      lines[-1] += "\n"
+    lines[-1:] = ''.join(lines[-1:] + [spillover]).splitlines(True)
+    if lines and not lines[-1].endswith('\n'):
+      lines[-1] += '\n'
     lines.reverse()
     if lines and iblock > 0:
       spillover = lines.pop()
     if lines:
       line_count += len(lines)
-      data = "".join(lines).replace("\0", "\n\t")
+      data = ''.join(lines).replace('\0', '\n\t')
       outstream.write(data)
   return line_count
 
@@ -907,13 +938,13 @@
     couldn't be opened or no such line could be found by inspecting
     the last 'blocksize' bytes of the file.
   """
-  if filename == "-":
-    StatusUpdate("Can't combine --append with output to stdout.")
+  if filename == '-':
+    StatusUpdate('Can\'t combine --append with output to stdout.')
     sys.exit(2)
   try:
-    fp = open(filename, "rb")
+    fp = open(filename, 'rb')
   except IOError, err:
-    StatusUpdate("Append mode disabled: can't read %r: %s." % (filename, err))
+    StatusUpdate('Append mode disabled: can\'t read %r: %s.' % (filename, err))
     return None
   try:
     fp.seek(0, 2)
@@ -922,13 +953,13 @@
     del lines[:1]
     sentinel = None
     for line in lines:
-      if not line.startswith("\t"):
+      if not line.startswith('\t'):
         sentinel = line
     if not sentinel:
-      StatusUpdate("Append mode disabled: can't find sentinel in %r." %
+      StatusUpdate('Append mode disabled: can\'t find sentinel in %r.' %
                    filename)
       return None
-    return sentinel.rstrip("\n")
+    return sentinel.rstrip('\n')
   finally:
     fp.close()
 
@@ -975,7 +1006,7 @@
       The string representation of the hash.
     """
     h = sha.new(content).hexdigest()
-    return "%s_%s_%s_%s_%s" % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
+    return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
 
   def AddFile(self, path, file_handle):
     """Adds the provided file to the list to be pushed to the server.
@@ -984,7 +1015,7 @@
       path: The path the file should be uploaded as.
       file_handle: A stream containing data to upload.
     """
-    assert not self.in_transaction, "Already in a transaction."
+    assert not self.in_transaction, 'Already in a transaction.'
     assert file_handle is not None
 
     reason = appinfo.ValidFilename(path)
@@ -1007,10 +1038,10 @@
       A list of pathnames for files that should be uploaded using UploadFile()
       before Commit() can be called.
     """
-    assert not self.in_transaction, "Already in a transaction."
+    assert not self.in_transaction, 'Already in a transaction.'
 
-    StatusUpdate("Initiating update.")
-    self.server.Send("/api/appversion/create", app_id=self.app_id,
+    StatusUpdate('Initiating update.')
+    self.server.Send('/api/appversion/create', app_id=self.app_id,
                      version=self.version, payload=self.config.ToYAML())
     self.in_transaction = True
 
@@ -1036,11 +1067,11 @@
       if not files:
         return
 
-      StatusUpdate("Cloning %d %s file%s." %
-                   (len(files), file_type, len(files) != 1 and "s" or ""))
+      StatusUpdate('Cloning %d %s file%s.' %
+                   (len(files), file_type, len(files) != 1 and 's' or ''))
       for i in xrange(0, len(files), MAX_FILES_TO_CLONE):
         if i > 0 and i % MAX_FILES_TO_CLONE == 0:
-          StatusUpdate("Cloned %d files." % i)
+          StatusUpdate('Cloned %d files.' % i)
 
         chunk = files[i:min(len(files), i + MAX_FILES_TO_CLONE)]
         result = self.server.Send(url,
@@ -1050,10 +1081,10 @@
           files_to_upload.update(dict(
               (f, self.files[f]) for f in result.split(LIST_DELIMITER)))
 
-    CloneFiles("/api/appversion/cloneblobs", blobs_to_clone, "static")
-    CloneFiles("/api/appversion/clonefiles", files_to_clone, "application")
+    CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static')
+    CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application')
 
-    logging.info("Files to upload: " + str(files_to_upload))
+    logging.info('Files to upload: ' + str(files_to_upload))
 
     self.files = files_to_upload
     return sorted(files_to_upload.iterkeys())
@@ -1071,19 +1102,19 @@
     Raises:
       KeyError: The provided file is not amongst those to be uploaded.
     """
-    assert self.in_transaction, "Begin() must be called before UploadFile()."
+    assert self.in_transaction, 'Begin() must be called before UploadFile().'
     if path not in self.files:
-      raise KeyError("File '%s' is not in the list of files to be uploaded."
+      raise KeyError('File \'%s\' is not in the list of files to be uploaded.'
                      % path)
 
     del self.files[path]
     mime_type = GetMimeTypeIfStaticFile(self.config, path)
     if mime_type is not None:
-      self.server.Send("/api/appversion/addblob", app_id=self.app_id,
+      self.server.Send('/api/appversion/addblob', app_id=self.app_id,
                        version=self.version, path=path, content_type=mime_type,
                        payload=file_handle.read())
     else:
-      self.server.Send("/api/appversion/addfile", app_id=self.app_id,
+      self.server.Send('/api/appversion/addfile', app_id=self.app_id,
                        version=self.version, path=path,
                        payload=file_handle.read())
 
@@ -1098,21 +1129,21 @@
     Raises:
       Exception: Some required files were not uploaded.
     """
-    assert self.in_transaction, "Begin() must be called before Commit()."
+    assert self.in_transaction, 'Begin() must be called before Commit().'
     if self.files:
-      raise Exception("Not all required files have been uploaded.")
+      raise Exception('Not all required files have been uploaded.')
 
     try:
       self.Deploy()
       if not RetryWithBackoff(1, 2, 8, self.IsReady):
-        logging.warning("Version still not ready to serve, aborting.")
-        raise Exception("Version not ready.")
+        logging.warning('Version still not ready to serve, aborting.')
+        raise Exception('Version not ready.')
       self.StartServing()
     except urllib2.HTTPError, e:
       if e.code != 404:
         raise
-      StatusUpdate("Closing update.")
-      self.server.Send("/api/appversion/commit", app_id=self.app_id,
+      StatusUpdate('Closing update.')
+      self.server.Send('/api/appversion/commit', app_id=self.app_id,
                        version=self.version)
       self.in_transaction = False
 
@@ -1125,12 +1156,12 @@
     Raises:
       Exception: Some required files were not uploaded.
     """
-    assert self.in_transaction, "Begin() must be called before Deploy()."
+    assert self.in_transaction, 'Begin() must be called before Deploy().'
     if self.files:
-      raise Exception("Not all required files have been uploaded.")
+      raise Exception('Not all required files have been uploaded.')
 
-    StatusUpdate("Deploying new version.")
-    self.server.Send("/api/appversion/deploy", app_id=self.app_id,
+    StatusUpdate('Deploying new version.')
+    self.server.Send('/api/appversion/deploy', app_id=self.app_id,
                      version=self.version)
     self.deployed = True
 
@@ -1143,12 +1174,12 @@
     Returns:
       True if the server returned the app is ready to serve.
     """
-    assert self.deployed, "Deploy() must be called before IsReady()."
+    assert self.deployed, 'Deploy() must be called before IsReady().'
 
-    StatusUpdate("Checking if new version is ready to serve.")
-    result = self.server.Send("/api/appversion/isready", app_id=self.app_id,
+    StatusUpdate('Checking if new version is ready to serve.')
+    result = self.server.Send('/api/appversion/isready', app_id=self.app_id,
                               version=self.version)
-    return result == "1"
+    return result == '1'
 
   def StartServing(self):
     """Start serving with the newly created version.
@@ -1156,10 +1187,10 @@
     Raises:
       Exception: Deploy has not yet been called.
     """
-    assert self.deployed, "Deploy() must be called before IsReady()."
+    assert self.deployed, 'Deploy() must be called before IsReady().'
 
-    StatusUpdate("Closing update: new version is ready to start serving.")
-    self.server.Send("/api/appversion/startserving",
+    StatusUpdate('Closing update: new version is ready to start serving.')
+    self.server.Send('/api/appversion/startserving',
                      app_id=self.app_id, version=self.version)
     self.in_transaction = False
 
@@ -1167,8 +1198,8 @@
     """Rolls back the transaction if one is in progress."""
     if not self.in_transaction:
       return
-    StatusUpdate("Rolling back the update.")
-    self.server.Send("/api/appversion/rollback", app_id=self.app_id,
+    StatusUpdate('Rolling back the update.')
+    self.server.Send('/api/appversion/rollback', app_id=self.app_id,
                      version=self.version)
     self.in_transaction = False
     self.files = {}
@@ -1181,47 +1212,47 @@
       max_size: The maximum size file to upload.
       openfunc: A function that takes a path and returns a file-like object.
     """
-    logging.info("Reading app configuration.")
+    logging.info('Reading app configuration.')
 
-    path = ""
+    path = ''
     try:
-      StatusUpdate("Scanning files on local disk.")
+      StatusUpdate('Scanning files on local disk.')
       num_files = 0
       for path in paths:
         file_handle = openfunc(path)
         try:
           if self.config.skip_files.match(path):
-            logging.info("Ignoring file '%s': File matches ignore regex.",
+            logging.info('Ignoring file \'%s\': File matches ignore regex.',
                          path)
           else:
             file_length = GetFileLength(file_handle)
             if file_length > max_size:
-              logging.error("Ignoring file '%s': Too long "
-                            "(max %d bytes, file is %d bytes)",
+              logging.error('Ignoring file \'%s\': Too long '
+                            '(max %d bytes, file is %d bytes)',
                             path, max_size, file_length)
             else:
-              logging.info("Processing file '%s'", path)
+              logging.info('Processing file \'%s\'', path)
               self.AddFile(path, file_handle)
         finally:
           file_handle.close()
         num_files += 1
         if num_files % 500 == 0:
-          StatusUpdate("Scanned %d files." % num_files)
+          StatusUpdate('Scanned %d files.' % num_files)
     except KeyboardInterrupt:
-      logging.info("User interrupted. Aborting.")
+      logging.info('User interrupted. Aborting.')
       raise
     except EnvironmentError, e:
-      logging.error("An error occurred processing file '%s': %s. Aborting.",
+      logging.error('An error occurred processing file \'%s\': %s. Aborting.',
                     path, e)
       raise
 
     try:
       missing_files = self.Begin()
       if missing_files:
-        StatusUpdate("Uploading %d files." % len(missing_files))
+        StatusUpdate('Uploading %d files.' % len(missing_files))
         num_files = 0
         for missing_file in missing_files:
-          logging.info("Uploading file '%s'" % missing_file)
+          logging.info('Uploading file \'%s\'' % missing_file)
           file_handle = openfunc(missing_file)
           try:
             self.UploadFile(missing_file, file_handle)
@@ -1229,20 +1260,20 @@
             file_handle.close()
           num_files += 1
           if num_files % 500 == 0:
-            StatusUpdate("Uploaded %d files." % num_files)
+            StatusUpdate('Uploaded %d files.' % num_files)
 
       self.Commit()
 
     except KeyboardInterrupt:
-      logging.info("User interrupted. Aborting.")
+      logging.info('User interrupted. Aborting.')
       self.Rollback()
       raise
     except:
-      logging.exception("An unexpected error occurred. Aborting.")
+      logging.exception('An unexpected error occurred. Aborting.')
       self.Rollback()
       raise
 
-    logging.info("Done!")
+    logging.info('Done!')
 
 
 def FileIterator(base, separator=os.path.sep):
@@ -1255,15 +1286,15 @@
   Yields:
     Paths of files found, relative to base.
   """
-  dirs = [""]
+  dirs = ['']
   while dirs:
     current_dir = dirs.pop()
     for entry in os.listdir(os.path.join(base, current_dir)):
       name = os.path.join(current_dir, entry)
       fullname = os.path.join(base, name)
       if os.path.isfile(fullname):
-        if separator == "\\":
-          name = name.replace("\\", "/")
+        if separator == '\\':
+          name = name.replace('\\', '/')
         yield name
       elif os.path.isdir(fullname):
         dirs.append(name)
@@ -1302,38 +1333,38 @@
   Returns:
     String containing the 'user-agent' header value, which includes the SDK
     version, the platform information, and the version of Python;
-    e.g., "appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2".
+    e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'.
   """
   product_tokens = []
 
-  sdk_name = os.environ.get("APPCFG_SDK_NAME")
+  sdk_name = os.environ.get('APPCFG_SDK_NAME')
   if sdk_name:
     product_tokens.append(sdk_name)
   else:
     version = get_version()
     if version is None:
-      release = "unknown"
+      release = 'unknown'
     else:
-      release = version["release"]
+      release = version['release']
 
-    product_tokens.append("appcfg_py/%s" % release)
+    product_tokens.append('appcfg_py/%s' % release)
 
   product_tokens.append(get_platform())
 
-  python_version = ".".join(str(i) for i in sys.version_info)
-  product_tokens.append("Python/%s" % python_version)
+  python_version = '.'.join(str(i) for i in sys.version_info)
+  product_tokens.append('Python/%s' % python_version)
 
-  return " ".join(product_tokens)
+  return ' '.join(product_tokens)
 
 
 def GetSourceName(get_version=GetVersionObject):
   """Gets the name of this source version."""
   version = get_version()
   if version is None:
-    release = "unknown"
+    release = 'unknown'
   else:
-    release = version["release"]
-  return "Google-appcfg-%s" % (release,)
+    release = version['release']
+  return 'Google-appcfg-%s' % (release,)
 
 
 class AppCfgApp(object):
@@ -1397,7 +1428,7 @@
     if len(self.args) < 1:
       self._PrintHelpAndExit()
     if self.args[0] not in self.actions:
-      self.parser.error("Unknown action '%s'\n%s" %
+      self.parser.error('Unknown action \'%s\'\n%s' %
                         (self.args[0], self.parser.get_description()))
     action_name = self.args.pop(0)
     self.action = self.actions[action_name]
@@ -1419,17 +1450,20 @@
     """Executes the requested action.
 
     Catches any HTTPErrors raised by the action and prints them to stderr.
+
+    Returns:
+      1 on error, 0 if successful.
     """
     try:
       self.action(self)
     except urllib2.HTTPError, e:
       body = e.read()
-      print >>self.error_fh, ("Error %d: --- begin server output ---\n"
-                              "%s\n--- end server output ---" %
-                              (e.code, body.rstrip("\n")))
+      print >>self.error_fh, ('Error %d: --- begin server output ---\n'
+                              '%s\n--- end server output ---' %
+                              (e.code, body.rstrip('\n')))
       return 1
     except yaml_errors.EventListenerError, e:
-      print >>self.error_fh, ("Error parsing yaml file:\n%s" % e)
+      print >>self.error_fh, ('Error parsing yaml file:\n%s' % e)
       return 1
     return 0
 
@@ -1437,9 +1471,9 @@
     """Returns a formatted string containing the short_descs for all actions."""
     action_names = self.actions.keys()
     action_names.sort()
-    desc = ""
+    desc = ''
     for action_name in action_names:
-      desc += "  %s: %s\n" % (action_name, self.actions[action_name].short_desc)
+      desc += '  %s: %s\n' % (action_name, self.actions[action_name].short_desc)
     return desc
 
   def _GetOptionParser(self):
@@ -1454,43 +1488,43 @@
 
       def format_description(self, description):
         """Very simple formatter."""
-        return description + "\n"
+        return description + '\n'
 
     desc = self._GetActionDescriptions()
-    desc = ("Action must be one of:\n%s"
-            "Use 'help <action>' for a detailed description.") % desc
+    desc = ('Action must be one of:\n%s'
+            'Use \'help <action>\' for a detailed description.') % desc
 
-    parser = self.parser_class(usage="%prog [options] <action>",
+    parser = self.parser_class(usage='%prog [options] <action>',
                                description=desc,
                                formatter=Formatter(),
-                               conflict_handler="resolve")
-    parser.add_option("-h", "--help", action="store_true",
-                      dest="help", help="Show the help message and exit.")
-    parser.add_option("-q", "--quiet", action="store_const", const=0,
-                      dest="verbose", help="Print errors only.")
-    parser.add_option("-v", "--verbose", action="store_const", const=2,
-                      dest="verbose", default=1,
-                      help="Print info level logs.")
-    parser.add_option("--noisy", action="store_const", const=3,
-                      dest="verbose", help="Print all logs.")
-    parser.add_option("-s", "--server", action="store", dest="server",
-                      default="appengine.google.com",
-                      metavar="SERVER", help="The server to connect to.")
-    parser.add_option("--secure", action="store_true", dest="secure",
+                               conflict_handler='resolve')
+    parser.add_option('-h', '--help', action='store_true',
+                      dest='help', help='Show the help message and exit.')
+    parser.add_option('-q', '--quiet', action='store_const', const=0,
+                      dest='verbose', help='Print errors only.')
+    parser.add_option('-v', '--verbose', action='store_const', const=2,
+                      dest='verbose', default=1,
+                      help='Print info level logs.')
+    parser.add_option('--noisy', action='store_const', const=3,
+                      dest='verbose', help='Print all logs.')
+    parser.add_option('-s', '--server', action='store', dest='server',
+                      default='appengine.google.com',
+                      metavar='SERVER', help='The server to connect to.')
+    parser.add_option('--secure', action='store_true', dest='secure',
                       default=False,
-                      help="Use SSL when communicating with the server.")
-    parser.add_option("-e", "--email", action="store", dest="email",
-                      metavar="EMAIL", default=None,
-                      help="The username to use. Will prompt if omitted.")
-    parser.add_option("-H", "--host", action="store", dest="host",
-                      metavar="HOST", default=None,
-                      help="Overrides the Host header sent with all RPCs.")
-    parser.add_option("--no_cookies", action="store_false",
-                      dest="save_cookies", default=True,
-                      help="Do not save authentication cookies to local disk.")
-    parser.add_option("--passin", action="store_true",
-                      dest="passin", default=False,
-                      help="Read the login password from stdin.")
+                      help='Use SSL when communicating with the server.')
+    parser.add_option('-e', '--email', action='store', dest='email',
+                      metavar='EMAIL', default=None,
+                      help='The username to use. Will prompt if omitted.')
+    parser.add_option('-H', '--host', action='store', dest='host',
+                      metavar='HOST', default=None,
+                      help='Overrides the Host header sent with all RPCs.')
+    parser.add_option('--no_cookies', action='store_false',
+                      dest='save_cookies', default=True,
+                      help='Do not save authentication cookies to local disk.')
+    parser.add_option('--passin', action='store_true',
+                      dest='passin', default=False,
+                      help='Read the login password from stdin.')
     return parser
 
   def _MakeSpecificParser(self, action):
@@ -1506,7 +1540,7 @@
     """
     parser = self._GetOptionParser()
     parser.set_usage(action.usage)
-    parser.set_description("%s\n%s" % (action.short_desc, action.long_desc))
+    parser.set_description('%s\n%s' % (action.short_desc, action.long_desc))
     action.options(self, parser)
     options, unused_args = parser.parse_args(self.argv[1:])
     return parser, options
@@ -1531,9 +1565,9 @@
       """Prompts the user for a username and password."""
       email = self.options.email
       if email is None:
-        email = self.raw_input_fn("Email: ")
+        email = self.raw_input_fn('Email: ')
 
-      password_prompt = "Password for %s: " % email
+      password_prompt = 'Password for %s: ' % email
       if self.options.passin:
         password = self.raw_input_fn(password_prompt)
       else:
@@ -1541,14 +1575,14 @@
 
       return (email, password)
 
-    if self.options.host and self.options.host == "localhost":
+    if self.options.host and self.options.host == 'localhost':
       email = self.options.email
       if email is None:
-        email = "test@example.com"
-        logging.info("Using debug user %s.  Override with --email" % email)
+        email = 'test@example.com'
+        logging.info('Using debug user %s.  Override with --email' % email)
       server = self.rpc_server_class(
           self.options.server,
-          lambda: (email, "password"),
+          lambda: (email, 'password'),
           GetUserAgent(),
           GetSourceName(),
           host_override=self.options.host,
@@ -1566,7 +1600,7 @@
                                  host_override=self.options.host,
                                  save_cookies=self.options.save_cookies,
                                  auth_tries=auth_tries,
-                                 account_type="HOSTED_OR_GOOGLE",
+                                 account_type='HOSTED_OR_GOOGLE',
                                  secure=self.options.secure)
 
   def _FindYaml(self, basepath, file_name):
@@ -1580,9 +1614,9 @@
       Path to located yaml file if one exists, else None.
     """
     if not os.path.isdir(basepath):
-      self.parser.error("Not a directory: %s" % basepath)
+      self.parser.error('Not a directory: %s' % basepath)
 
-    for yaml_file in (file_name + ".yaml", file_name + ".yml"):
+    for yaml_file in (file_name + '.yaml', file_name + '.yml'):
       yaml_path = os.path.join(basepath, yaml_file)
       if os.path.isfile(yaml_path):
         return yaml_path
@@ -1598,18 +1632,39 @@
     Returns:
       An AppInfoExternal object.
     """
-    appyaml_filename = self._FindYaml(basepath, "app")
+    appyaml_filename = self._FindYaml(basepath, 'app')
     if appyaml_filename is None:
-      self.parser.error("Directory does not contain an app.yaml "
-                        "configuration file.")
+      self.parser.error('Directory does not contain an app.yaml '
+                        'configuration file.')
 
-    fh = open(appyaml_filename, "r")
+    fh = open(appyaml_filename, 'r')
     try:
       appyaml = appinfo.LoadSingleAppInfo(fh)
     finally:
       fh.close()
     return appyaml
 
+  def _ParseYamlFile(self, basepath, basename, parser):
+    """Parses the a yaml file.
+
+    Args:
+      basepath: the directory of the application.
+      basename: the base name of the file (with the '.yaml' stripped off).
+      parser: the function or method used to parse the file.
+
+    Returns:
+      A single parsed yaml file or None if the file does not exist.
+    """
+    file_name = self._FindYaml(basepath, basename)
+    if file_name is not None:
+      fh = open(file_name, 'r')
+      try:
+        defns = parser(fh)
+      finally:
+        fh.close()
+      return defns
+    return None
+
   def _ParseIndexYaml(self, basepath):
     """Parses the index.yaml file.
 
@@ -1619,15 +1674,8 @@
     Returns:
       A single parsed yaml file or None if the file does not exist.
     """
-    file_name = self._FindYaml(basepath, "index")
-    if file_name is not None:
-      fh = open(file_name, "r")
-      try:
-        index_defs = datastore_index.ParseIndexDefinitions(fh)
-      finally:
-        fh.close()
-      return index_defs
-    return None
+    return self._ParseYamlFile(basepath, 'index',
+                               datastore_index.ParseIndexDefinitions)
 
   def _ParseCronYaml(self, basepath):
     """Parses the cron.yaml file.
@@ -1636,17 +1684,20 @@
       basepath: the directory of the application.
 
     Returns:
-      A CronInfoExternal object.
+      A CronInfoExternal object or None if the file does not exist.
     """
-    file_name = self._FindYaml(basepath, "cron")
-    if file_name is not None:
-      fh = open(file_name, "r")
-      try:
-        cron_info = croninfo.LoadSingleCron(fh)
-      finally:
-        fh.close()
-      return cron_info
-    return None
+    return self._ParseYamlFile(basepath, 'cron', croninfo.LoadSingleCron)
+
+  def _ParseQueueYaml(self, basepath):
+    """Parses the queue.yaml file.
+
+    Args:
+      basepath: the directory of the application.
+
+    Returns:
+      A CronInfoExternal object or None if the file does not exist.
+    """
+    return self._ParseYamlFile(basepath, 'queue', queueinfo.LoadSingleQueue)
 
   def Help(self):
     """Prints help for a specific action.
@@ -1655,7 +1706,7 @@
     Exits the program after printing the help message.
     """
     if len(self.args) != 1 or self.args[0] not in self.actions:
-      self.parser.error("Expected a single action argument. Must be one of:\n" +
+      self.parser.error('Expected a single action argument. Must be one of:\n' +
                         self._GetActionDescriptions())
 
     action = self.actions[self.args[0]]
@@ -1665,7 +1716,7 @@
   def Update(self):
     """Updates and deploys a new appversion."""
     if len(self.args) != 1:
-      self.parser.error("Expected a single <directory> argument.")
+      self.parser.error('Expected a single <directory> argument.')
 
     basepath = self.args[0]
     appyaml = self._ParseAppYaml(basepath)
@@ -1676,7 +1727,7 @@
 
     appversion = AppVersionUpload(rpc_server, appyaml)
     appversion.DoUpload(FileIterator(basepath), self.options.max_size,
-                        lambda path: open(os.path.join(basepath, path), "rb"))
+                        lambda path: open(os.path.join(basepath, path), 'rb'))
 
     index_defs = self._ParseIndexYaml(basepath)
     if index_defs:
@@ -1684,32 +1735,37 @@
       try:
         index_upload.DoUpload()
       except urllib2.HTTPError, e:
-        StatusUpdate("Error %d: --- begin server output ---\n"
-                     "%s\n--- end server output ---" %
-                     (e.code, e.read().rstrip("\n")))
+        StatusUpdate('Error %d: --- begin server output ---\n'
+                     '%s\n--- end server output ---' %
+                     (e.code, e.read().rstrip('\n')))
         print >> self.error_fh, (
-            "Your app was updated, but there was an error updating your "
-            "indexes. Please retry later with appcfg.py update_indexes.")
+            'Your app was updated, but there was an error updating your '
+            'indexes. Please retry later with appcfg.py update_indexes.')
 
     cron_entries = self._ParseCronYaml(basepath)
     if cron_entries:
       cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
       cron_upload.DoUpload()
 
+    queue_entries = self._ParseQueueYaml(basepath)
+    if queue_entries:
+      queue_upload = QueueEntryUpload(rpc_server, appyaml, queue_entries)
+      queue_upload.DoUpload()
+
   def _UpdateOptions(self, parser):
     """Adds update-specific options to 'parser'.
 
     Args:
       parser: An instance of OptionsParser.
     """
-    parser.add_option("-S", "--max_size", type="int", dest="max_size",
-                      default=10485760, metavar="SIZE",
-                      help="Maximum size of a file to upload.")
+    parser.add_option('-S', '--max_size', type='int', dest='max_size',
+                      default=10485760, metavar='SIZE',
+                      help='Maximum size of a file to upload.')
 
   def VacuumIndexes(self):
     """Deletes unused indexes."""
     if len(self.args) != 1:
-      self.parser.error("Expected a single <directory> argument.")
+      self.parser.error('Expected a single <directory> argument.')
 
     basepath = self.args[0]
     config = self._ParseAppYaml(basepath)
@@ -1730,14 +1786,14 @@
     Args:
       parser: An instance of OptionsParser.
     """
-    parser.add_option("-f", "--force", action="store_true", dest="force_delete",
+    parser.add_option('-f', '--force', action='store_true', dest='force_delete',
                       default=False,
-                      help="Force deletion without being prompted.")
+                      help='Force deletion without being prompted.')
 
   def UpdateCron(self):
     """Updates any new or changed cron definitions."""
     if len(self.args) != 1:
-      self.parser.error("Expected a single <directory> argument.")
+      self.parser.error('Expected a single <directory> argument.')
 
     basepath = self.args[0]
     appyaml = self._ParseAppYaml(basepath)
@@ -1751,7 +1807,7 @@
   def UpdateIndexes(self):
     """Updates indexes."""
     if len(self.args) != 1:
-      self.parser.error("Expected a single <directory> argument.")
+      self.parser.error('Expected a single <directory> argument.')
 
     basepath = self.args[0]
     appyaml = self._ParseAppYaml(basepath)
@@ -1762,10 +1818,24 @@
       index_upload = IndexDefinitionUpload(rpc_server, appyaml, index_defs)
       index_upload.DoUpload()
 
+  def UpdateQueues(self):
+    """Updates any new or changed task queue definitions."""
+    if len(self.args) != 1:
+      self.parser.error('Expected a single <directory> argument.')
+
+    basepath = self.args[0]
+    appyaml = self._ParseAppYaml(basepath)
+    rpc_server = self._GetRpcServer()
+
+    queue_entries = self._ParseQueueYaml(basepath)
+    if queue_entries:
+      queue_upload = QueueEntryUpload(rpc_server, appyaml, queue_entries)
+      queue_upload.DoUpload()
+
   def Rollback(self):
     """Does a rollback of any existing transaction for this app version."""
     if len(self.args) != 1:
-      self.parser.error("Expected a single <directory> argument.")
+      self.parser.error('Expected a single <directory> argument.')
 
     basepath = self.args[0]
     appyaml = self._ParseAppYaml(basepath)
@@ -1778,11 +1848,11 @@
     """Write request logs to a file."""
     if len(self.args) != 2:
       self.parser.error(
-          "Expected a <directory> argument and an <output_file> argument.")
+          'Expected a <directory> argument and an <output_file> argument.')
     if (self.options.severity is not None and
         not 0 <= self.options.severity <= MAX_LOG_LEVEL):
       self.parser.error(
-          "Severity range is 0 (DEBUG) through %s (CRITICAL)." % MAX_LOG_LEVEL)
+          'Severity range is 0 (DEBUG) through %s (CRITICAL).' % MAX_LOG_LEVEL)
 
     if self.options.num_days is None:
       self.options.num_days = int(not self.options.append)
@@ -1794,7 +1864,8 @@
                                    self.options.append,
                                    self.options.severity,
                                    time.time(),
-                                   self.options.vhost)
+                                   self.options.vhost,
+                                   self.options.include_vhost)
     logs_requester.DownloadLogs()
 
   def _RequestLogsOptions(self, parser):
@@ -1803,25 +1874,28 @@
     Args:
       parser: An instance of OptionsParser.
     """
-    parser.add_option("-n", "--num_days", type="int", dest="num_days",
-                      action="store", default=None,
-                      help="Number of days worth of log data to get. "
-                      "The cut-off point is midnight UTC. "
-                      "Use 0 to get all available logs. "
-                      "Default is 1, unless --append is also given; "
-                      "then the default is 0.")
-    parser.add_option("-a", "--append", dest="append",
-                      action="store_true", default=False,
-                      help="Append to existing file.")
-    parser.add_option("--severity", type="int", dest="severity",
-                      action="store", default=None,
-                      help="Severity of app-level log messages to get. "
-                      "The range is 0 (DEBUG) through 4 (CRITICAL). "
-                      "If omitted, only request logs are returned.")
-    parser.add_option("--vhost", type="string", dest="vhost",
-                      action="store", default=None,
-                      help="The virtual host of log messages to get. "
-                      "If omitted, all log messages are returned.")
+    parser.add_option('-n', '--num_days', type='int', dest='num_days',
+                      action='store', default=None,
+                      help='Number of days worth of log data to get. '
+                      'The cut-off point is midnight UTC. '
+                      'Use 0 to get all available logs. '
+                      'Default is 1, unless --append is also given; '
+                      'then the default is 0.')
+    parser.add_option('-a', '--append', dest='append',
+                      action='store_true', default=False,
+                      help='Append to existing file.')
+    parser.add_option('--severity', type='int', dest='severity',
+                      action='store', default=None,
+                      help='Severity of app-level log messages to get. '
+                      'The range is 0 (DEBUG) through 4 (CRITICAL). '
+                      'If omitted, only request logs are returned.')
+    parser.add_option('--vhost', type='string', dest='vhost',
+                      action='store', default=None,
+                      help='The virtual host of log messages to get. '
+                      'If omitted, all log messages are returned.')
+    parser.add_option('--include_vhost', dest='include_vhost',
+                      action='store_true', default=False,
+                      help='Include virtual host in log messages.')
 
   def CronInfo(self, now=None, output=sys.stdout):
     """Displays information about cron definitions.
@@ -1831,7 +1905,7 @@
       output: Used for testing.
     """
     if len(self.args) != 1:
-      self.parser.error("Expected a single <directory> argument.")
+      self.parser.error('Expected a single <directory> argument.')
     if now is None:
       now = datetime.datetime.now()
 
@@ -1841,15 +1915,15 @@
       for entry in cron_entries.cron:
         description = entry.description
         if not description:
-          description = "<no description>"
-        print >>output, "\n%s:\nURL: %s\nSchedule: %s" % (description,
+          description = '<no description>'
+        print >>output, '\n%s:\nURL: %s\nSchedule: %s' % (description,
                                                           entry.url,
                                                           entry.schedule)
         schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
         matches = schedule.GetMatches(now, self.options.num_runs)
         for match in matches:
-          print >>output, "%s, %s from now" % (
-              match.strftime("%Y-%m-%d %H:%M:%S"), match - now)
+          print >>output, '%s, %s from now' % (
+              match.strftime('%Y-%m-%d %H:%M:%S'), match - now)
 
   def _CronInfoOptions(self, parser):
     """Adds cron_info-specific options to 'parser'.
@@ -1857,20 +1931,20 @@
     Args:
       parser: An instance of OptionsParser.
     """
-    parser.add_option("-n", "--num_runs", type="int", dest="num_runs",
-                      action="store", default=5,
-                      help="Number of runs of each cron job to display"
-                      "Default is 5")
+    parser.add_option('-n', '--num_runs', type='int', dest='num_runs',
+                      action='store', default=5,
+                      help='Number of runs of each cron job to display'
+                      'Default is 5')
 
   def _CheckRequiredLoadOptions(self):
     """Checks that upload/download options are present."""
-    for option in ["filename", "kind", "config_file"]:
+    for option in ['filename', 'kind', 'config_file']:
       if getattr(self.options, option) is None:
-        self.parser.error("Option '%s' is required." % option)
+        self.parser.error('Option \'%s\' is required.' % option)
     if not self.options.url:
-      self.parser.error("You must have google.appengine.ext.remote_api.handler "
-                        "assigned to an endpoint in app.yaml, or provide "
-                        "the url of the handler via the 'url' option.")
+      self.parser.error('You must have google.appengine.ext.remote_api.handler '
+                        'assigned to an endpoint in app.yaml, or provide '
+                        'the url of the handler via the \'url\' option.')
 
   def InferRemoteApiUrl(self, appyaml):
     """Uses app.yaml to determine the remote_api endpoint.
@@ -1882,16 +1956,16 @@
       The url of the remote_api endpoint as a string, or None
     """
     handlers = appyaml.handlers
-    handler_suffix = "remote_api/handler.py"
+    handler_suffix = 'remote_api/handler.py'
     app_id = appyaml.application
     for handler in handlers:
-      if hasattr(handler, "script") and handler.script:
+      if hasattr(handler, 'script') and handler.script:
         if handler.script.endswith(handler_suffix):
           server = self.options.server
-          if server == "appengine.google.com":
-            return "http://%s.appspot.com%s" % (app_id, handler.url)
+          if server == 'appengine.google.com':
+            return 'http://%s.appspot.com%s' % (app_id, handler.url)
           else:
-            return "http://%s%s" % (server, handler.url)
+            return 'http://%s%s' % (server, handler.url)
     return None
 
   def RunBulkloader(self, arg_dict):
@@ -1903,8 +1977,8 @@
     try:
       import sqlite3
     except ImportError:
-      logging.error("upload_data action requires SQLite3 and the python "
-                    "sqlite3 module (included in python since 2.5).")
+      logging.error('upload_data action requires SQLite3 and the python '
+                    'sqlite3 module (included in python since 2.5).')
       sys.exit(1)
 
     sys.exit(bulkloader.Run(arg_dict))
@@ -1912,7 +1986,7 @@
   def _SetupLoad(self):
     """Performs common verification and set up for upload and download."""
     if len(self.args) != 1:
-      self.parser.error("Expected <directory> argument.")
+      self.parser.error('Expected <directory> argument.')
 
     basepath = self.args[0]
     appyaml = self._ParseAppYaml(basepath)
@@ -1927,7 +2001,7 @@
     self._CheckRequiredLoadOptions()
 
     if self.options.batch_size < 1:
-      self.parser.error("batch_size must be 1 or larger.")
+      self.parser.error('batch_size must be 1 or larger.')
 
     if verbosity == 1:
       logging.getLogger().setLevel(logging.INFO)
@@ -1939,27 +2013,27 @@
   def _MakeLoaderArgs(self):
     return dict([(arg_name, getattr(self.options, arg_name, None)) for
                  arg_name in (
-        "app_id",
-        "url",
-        "filename",
-        "batch_size",
-        "kind",
-        "num_threads",
-        "bandwidth_limit",
-        "rps_limit",
-        "http_limit",
-        "db_filename",
-        "config_file",
-        "auth_domain",
-        "has_header",
-        "loader_opts",
-        "log_file",
-        "passin",
-        "email",
-        "debug",
-        "exporter_opts",
-        "result_db_filename",
-        )])
+                     'app_id',
+                     'url',
+                     'filename',
+                     'batch_size',
+                     'kind',
+                     'num_threads',
+                     'bandwidth_limit',
+                     'rps_limit',
+                     'http_limit',
+                     'db_filename',
+                     'config_file',
+                     'auth_domain',
+                     'has_header',
+                     'loader_opts',
+                     'log_file',
+                     'passin',
+                     'email',
+                     'debug',
+                     'exporter_opts',
+                     'result_db_filename',
+                     )])
 
   def PerformDownload(self, run_fn=None):
     """Performs a datastore download via the bulkloader.
@@ -1971,7 +2045,7 @@
       run_fn = self.RunBulkloader
     self._SetupLoad()
 
-    StatusUpdate("Downloading data records.")
+    StatusUpdate('Downloading data records.')
 
     args = self._MakeLoaderArgs()
     args['download'] = True
@@ -1989,7 +2063,7 @@
       run_fn = self.RunBulkloader
     self._SetupLoad()
 
-    StatusUpdate("Uploading data records.")
+    StatusUpdate('Uploading data records.')
 
     args = self._MakeLoaderArgs()
     args['download'] = False
@@ -2002,44 +2076,44 @@
     Args:
       parser: An instance of OptionsParser.
     """
-    parser.add_option("--filename", type="string", dest="filename",
-                      action="store",
-                      help="The name of the file containing the input data."
-                      " (Required)")
-    parser.add_option("--config_file", type="string", dest="config_file",
-                      action="store",
-                      help="Name of the configuration file. (Required)")
-    parser.add_option("--kind", type="string", dest="kind",
-                      action="store",
-                      help="The kind of the entities to store. (Required)")
-    parser.add_option("--url", type="string", dest="url",
-                      action="store",
-                      help="The location of the remote_api endpoint.")
-    parser.add_option("--num_threads", type="int", dest="num_threads",
-                      action="store", default=10,
-                      help="Number of threads to upload records with.")
-    parser.add_option("--batch_size", type="int", dest="batch_size",
-                      action="store", default=10,
-                      help="Number of records to post in each request.")
-    parser.add_option("--bandwidth_limit", type="int", dest="bandwidth_limit",
-                      action="store", default=250000,
-                      help="The maximum bytes/second bandwidth for transfers.")
-    parser.add_option("--rps_limit", type="int", dest="rps_limit",
-                      action="store", default=20,
-                      help="The maximum records/second for transfers.")
-    parser.add_option("--http_limit", type="int", dest="http_limit",
-                      action="store", default=8,
-                      help="The maximum requests/second for transfers.")
-    parser.add_option("--db_filename", type="string", dest="db_filename",
-                      action="store",
-                      help="Name of the progress database file.")
-    parser.add_option("--auth_domain", type="string", dest="auth_domain",
-                      action="store", default="gmail.com",
-                      help="The name of the authorization domain to use.")
-    parser.add_option("--log_file", type="string", dest="log_file",
-                      help="File to write bulkloader logs.  If not supplied "
-                           "then a new log file will be created, named: "
-                           "bulkloader-log-TIMESTAMP.")
+    parser.add_option('--filename', type='string', dest='filename',
+                      action='store',
+                      help='The name of the file containing the input data.'
+                      ' (Required)')
+    parser.add_option('--config_file', type='string', dest='config_file',
+                      action='store',
+                      help='Name of the configuration file. (Required)')
+    parser.add_option('--kind', type='string', dest='kind',
+                      action='store',
+                      help='The kind of the entities to store. (Required)')
+    parser.add_option('--url', type='string', dest='url',
+                      action='store',
+                      help='The location of the remote_api endpoint.')
+    parser.add_option('--num_threads', type='int', dest='num_threads',
+                      action='store', default=10,
+                      help='Number of threads to upload records with.')
+    parser.add_option('--batch_size', type='int', dest='batch_size',
+                      action='store', default=10,
+                      help='Number of records to post in each request.')
+    parser.add_option('--bandwidth_limit', type='int', dest='bandwidth_limit',
+                      action='store', default=250000,
+                      help='The maximum bytes/second bandwidth for transfers.')
+    parser.add_option('--rps_limit', type='int', dest='rps_limit',
+                      action='store', default=20,
+                      help='The maximum records/second for transfers.')
+    parser.add_option('--http_limit', type='int', dest='http_limit',
+                      action='store', default=8,
+                      help='The maximum requests/second for transfers.')
+    parser.add_option('--db_filename', type='string', dest='db_filename',
+                      action='store',
+                      help='Name of the progress database file.')
+    parser.add_option('--auth_domain', type='string', dest='auth_domain',
+                      action='store', default='gmail.com',
+                      help='The name of the authorization domain to use.')
+    parser.add_option('--log_file', type='string', dest='log_file',
+                      help='File to write bulkloader logs.  If not supplied '
+                      'then a new log file will be created, named: '
+                      'bulkloader-log-TIMESTAMP.')
 
   def _PerformUploadOptions(self, parser):
     """Adds 'upload_data' specific options to the 'parser' passed in.
@@ -2048,12 +2122,12 @@
       parser: An instance of OptionsParser.
     """
     self._PerformLoadOptions(parser)
-    parser.add_option("--has_header", dest="has_header",
-                      action="store_true", default=False,
-                      help="Whether the first line of the input file should be"
-                      " skipped")
-    parser.add_option("--loader_opts", type="string", dest="loader_opts",
-                      help="A string to pass to the Loader.initialize method.")
+    parser.add_option('--has_header', dest='has_header',
+                      action='store_true', default=False,
+                      help='Whether the first line of the input file should be'
+                      ' skipped')
+    parser.add_option('--loader_opts', type='string', dest='loader_opts',
+                      help='A string to pass to the Loader.initialize method.')
 
   def _PerformDownloadOptions(self, parser):
     """Adds 'download_data' specific options to the 'parser' passed in.
@@ -2062,13 +2136,13 @@
       parser: An instance of OptionsParser.
     """
     self._PerformLoadOptions(parser)
-    parser.add_option("--exporter_opts", type="string", dest="exporter_opts",
-                      help="A string to pass to the Exporter.initialize method."
-                      )
-    parser.add_option("--result_db_filename", type="string",
-                      dest="result_db_filename",
-                      action="store",
-                      help="Database to write entities to for download.")
+    parser.add_option('--exporter_opts', type='string', dest='exporter_opts',
+                      help='A string to pass to the Exporter.initialize method.'
+                     )
+    parser.add_option('--result_db_filename', type='string',
+                      dest='result_db_filename',
+                      action='store',
+                      help='Database to write entities to for download.')
 
   class Action(object):
     """Contains information about a command line action.
@@ -2084,7 +2158,7 @@
         object.
     """
 
-    def __init__(self, function, usage, short_desc, long_desc="",
+    def __init__(self, function, usage, short_desc, long_desc='',
                  options=lambda obj, parser: None):
       """Initializer for the class attributes."""
       self.function = function
@@ -2097,22 +2171,28 @@
       """Invoke this Action on the specified AppCfg.
 
       This calls the function of the appropriate name on AppCfg, and
-      respects polymophic overrides."""
+      respects polymophic overrides.
+
+      Args:
+        appcfg: The appcfg to use.
+      Returns:
+        The result of the function call.
+      """
       method = getattr(appcfg, self.function)
       return method()
 
   actions = {
 
-      "help": Action(
-          function="Help",
-          usage="%prog help <action>",
-          short_desc="Print help for a specific action."),
+      'help': Action(
+          function='Help',
+          usage='%prog help <action>',
+          short_desc='Print help for a specific action.'),
 
-      "update": Action(
-          function="Update",
-          usage="%prog [options] update <directory>",
+      'update': Action(
+          function='Update',
+          usage='%prog [options] update <directory>',
           options=_UpdateOptions,
-          short_desc="Create or update an app version.",
+          short_desc='Create or update an app version.',
           long_desc="""
 Specify a directory that contains all of the files required by
 the app, and appcfg.py will create/update the app version referenced
@@ -2120,27 +2200,35 @@
 will follow symlinks and recursively upload all files to the server.
 Temporary or source control files (e.g. foo~, .svn/*) will be skipped."""),
 
-      "update_cron": Action(
-          function="UpdateCron",
-          usage="%prog [options] update_cron <directory>",
-          short_desc="Update application cron definitions.",
+      'update_cron': Action(
+          function='UpdateCron',
+          usage='%prog [options] update_cron <directory>',
+          short_desc='Update application cron definitions.',
           long_desc="""
 The 'update_cron' command will update any new, removed or changed cron
-definitions from the cron.yaml file."""),
+definitions from the optional cron.yaml file."""),
 
-      "update_indexes": Action(
-          function="UpdateIndexes",
-          usage="%prog [options] update_indexes <directory>",
-          short_desc="Update application indexes.",
+      'update_indexes': Action(
+          function='UpdateIndexes',
+          usage='%prog [options] update_indexes <directory>',
+          short_desc='Update application indexes.',
           long_desc="""
 The 'update_indexes' command will add additional indexes which are not currently
 in production as well as restart any indexes that were not completed."""),
 
-      "vacuum_indexes": Action(
-          function="VacuumIndexes",
-          usage="%prog [options] vacuum_indexes <directory>",
+      'update_queues': Action(
+          function='UpdateQueues',
+          usage='%prog [options] update_queues <directory>',
+          short_desc='Update application task queue definitions.',
+          long_desc="""
+The 'update_queue' command will update any new, removed or changed task queue
+definitions from the optional queue.yaml file."""),
+
+      'vacuum_indexes': Action(
+          function='VacuumIndexes',
+          usage='%prog [options] vacuum_indexes <directory>',
           options=_VacuumIndexesOptions,
-          short_desc="Delete unused indexes from application.",
+          short_desc='Delete unused indexes from application.',
           long_desc="""
 The 'vacuum_indexes' command will help clean up indexes which are no longer
 in use.  It does this by comparing the local index configuration with
@@ -2148,48 +2236,48 @@
 server do not exist in the index configuration file, the user is given the
 option to delete them."""),
 
-      "rollback": Action(
-          function="Rollback",
-          usage="%prog [options] rollback <directory>",
-          short_desc="Rollback an in-progress update.",
+      'rollback': Action(
+          function='Rollback',
+          usage='%prog [options] rollback <directory>',
+          short_desc='Rollback an in-progress update.',
           long_desc="""
 The 'update' command requires a server-side transaction.  Use 'rollback'
 if you get an error message about another transaction being in progress
 and you are sure that there is no such transaction."""),
 
-      "request_logs": Action(
-          function="RequestLogs",
-          usage="%prog [options] request_logs <directory> <output_file>",
+      'request_logs': Action(
+          function='RequestLogs',
+          usage='%prog [options] request_logs <directory> <output_file>',
           options=_RequestLogsOptions,
-          short_desc="Write request logs in Apache common log format.",
+          short_desc='Write request logs in Apache common log format.',
           long_desc="""
 The 'request_logs' command exports the request logs from your application
 to a file.  It will write Apache common log format records ordered
 chronologically.  If output file is '-' stdout will be written."""),
 
-      "cron_info": Action(
-          function="CronInfo",
-          usage="%prog [options] cron_info <directory>",
+      'cron_info': Action(
+          function='CronInfo',
+          usage='%prog [options] cron_info <directory>',
           options=_CronInfoOptions,
-          short_desc="Display information about cron jobs.",
+          short_desc='Display information about cron jobs.',
           long_desc="""
 The 'cron_info' command will display the next 'number' runs (default 5) for
 each cron job defined in the cron.yaml file."""),
 
-      "upload_data": Action(
-          function="PerformUpload",
-          usage="%prog [options] upload_data <directory>",
+      'upload_data': Action(
+          function='PerformUpload',
+          usage='%prog [options] upload_data <directory>',
           options=_PerformUploadOptions,
-          short_desc="Upload data records to datastore.",
+          short_desc='Upload data records to datastore.',
           long_desc="""
 The 'upload_data' command translates input records into datastore entities and
 uploads them into your application's datastore."""),
 
-      "download_data": Action(
-          function="PerformDownload",
-          usage="%prog [options] download_data <directory>",
+      'download_data': Action(
+          function='PerformDownload',
+          usage='%prog [options] download_data <directory>',
           options=_PerformDownloadOptions,
-          short_desc="Download entities from datastore.",
+          short_desc='Download entities from datastore.',
           long_desc="""
 The 'download_data' command downloads datastore entities and writes them to
 file as CSV or developer defined format."""),
@@ -2200,16 +2288,16 @@
 
 
 def main(argv):
-  logging.basicConfig(format=("%(asctime)s %(levelname)s %(filename)s:"
-                              "%(lineno)s %(message)s "))
+  logging.basicConfig(format=('%(asctime)s %(levelname)s %(filename)s:'
+                              '%(lineno)s %(message)s '))
   try:
     result = AppCfgApp(argv).Run()
     if result:
       sys.exit(result)
   except KeyboardInterrupt:
-    StatusUpdate("Interrupted.")
+    StatusUpdate('Interrupted.')
     sys.exit(1)
 
 
-if __name__ == "__main__":
+if __name__ == '__main__':
   main(sys.argv)
--- a/thirdparty/google_appengine/google/appengine/tools/bulkloader.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/tools/bulkloader.py	Fri Jun 19 16:13:32 2009 +0200
@@ -3493,7 +3493,8 @@
 def InterruptibleQueueJoin(queue,
                            thread_local,
                            thread_gate,
-                           queue_join_thread_factory=QueueJoinThread):
+                           queue_join_thread_factory=QueueJoinThread,
+                           check_workers=True):
   """Repeatedly joins the given ReQueue or Queue.Queue with short timeout.
 
   Between each timeout on the join, worker threads are checked.
@@ -3503,6 +3504,7 @@
     thread_local: A threading.local instance which indicates interrupts.
     thread_gate: A ThreadGate instance.
     queue_join_thread_factory: Used for dependency injection.
+    check_workers: Whether to interrupt the join on worker death.
 
   Returns:
     True unless the queue join is interrupted by SIGINT or worker death.
@@ -3516,9 +3518,10 @@
     if thread_local.shut_down:
       logger.debug('Queue join interrupted')
       return False
-    for worker_thread in thread_gate.Threads():
-      if not worker_thread.isAlive():
-        return False
+    if check_workers:
+      for worker_thread in thread_gate.Threads():
+        if not worker_thread.isAlive():
+          return False
 
 
 def ShutdownThreads(data_source_thread, work_queue, thread_gate):
@@ -3732,7 +3735,8 @@
       thread.CheckError()
 
     if self.progress_thread.isAlive():
-      _Join(progress_queue, 'progress_queue to finish')
+      InterruptibleQueueJoin(progress_queue, thread_local, thread_gate,
+                             check_workers=False)
     else:
       logger.warn('Progress thread exited prematurely')
 
@@ -4004,7 +4008,7 @@
 
 1. At the top of the file, add this:
 
-from google.appengine.tools import bulkloader.Loader
+from google.appengine.tools.bulkloader import Loader
 
 2. For each of your Loader subclasses add the following at the end of the
    __init__ definitioion:
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver.py	Fri Jun 19 16:13:32 2009 +0200
@@ -36,6 +36,7 @@
 import __builtin__
 import BaseHTTPServer
 import Cookie
+import base64
 import cStringIO
 import cgi
 import cgitb
@@ -88,6 +89,7 @@
 from google.appengine.api import user_service_stub
 from google.appengine.api import yaml_errors
 from google.appengine.api.capabilities import capability_stub
+from google.appengine.api.labs.taskqueue import taskqueue_stub
 from google.appengine.api.memcache import memcache_stub
 
 from google.appengine import dist
@@ -128,6 +130,9 @@
 
 API_VERSION = '1'
 
+SITE_PACKAGES = os.path.normcase(os.path.join(os.path.dirname(os.__file__),
+                                              'site-packages'))
+
 
 class Error(Exception):
   """Base-class for exceptions in this module."""
@@ -517,9 +522,11 @@
 _IGNORE_REQUEST_HEADERS = frozenset(['content-type', 'content-length',
                                      'accept-encoding', 'transfer-encoding'])
 
+
 def SetupEnvironment(cgi_path,
                      relative_url,
                      headers,
+                     infile,
                      split_url=SplitURL,
                      get_user_info=dev_appserver_login.GetUserInfo):
   """Sets up environment variables for a CGI.
@@ -528,6 +535,7 @@
     cgi_path: Full file-system path to the CGI being executed.
     relative_url: Relative URL used to access the CGI.
     headers: Instance of mimetools.Message containing request headers.
+    infile: File-like object with input data from the request.
     split_url, get_user_info: Used for dependency injection.
 
   Returns:
@@ -558,6 +566,16 @@
     adjusted_name = key.replace('-', '_').upper()
     env['HTTP_' + adjusted_name] = ', '.join(headers.getheaders(key))
 
+  PAYLOAD_HEADER = 'HTTP_X_APPENGINE_DEVELOPMENT_PAYLOAD'
+  if PAYLOAD_HEADER in env:
+    del env[PAYLOAD_HEADER]
+    new_data = base64.standard_b64decode(infile.getvalue())
+    infile.seek(0)
+    infile.truncate()
+    infile.write(new_data)
+    infile.seek(0)
+    env['CONTENT_LENGTH'] = str(len(new_data))
+
   return env
 
 
@@ -805,13 +823,11 @@
 
 
 
-    os.path.normcase(os.path.join(os.path.dirname(os.__file__),
-                                  'site-packages'))
+    SITE_PACKAGES,
   ])
 
   ALLOWED_SITE_PACKAGE_DIRS = set(
-    os.path.normcase(os.path.abspath(os.path.join(
-      os.path.dirname(os.__file__), 'site-packages', path)))
+    os.path.normcase(os.path.abspath(os.path.join(SITE_PACKAGES, path)))
     for path in [
 
   ])
@@ -905,6 +921,31 @@
     FakeFile._availability_cache = {}
 
   @staticmethod
+  def SetAllowedModule(name):
+    """Allow the use of a module based on where it is located.
+
+    Meant to be used by use_library() so that it has a link back into the
+    trusted part of the interpreter.
+
+    Args:
+      name: Name of the module to allow.
+    """
+    stream, pathname, description = imp.find_module(name)
+    pathname = os.path.normcase(os.path.abspath(pathname))
+    if stream:
+      stream.close()
+      FakeFile.ALLOWED_FILES.add(pathname)
+      FakeFile.ALLOWED_FILES.add(os.path.realpath(pathname))
+    else:
+      assert description[2] == imp.PKG_DIRECTORY
+      if pathname.startswith(SITE_PACKAGES):
+        FakeFile.ALLOWED_SITE_PACKAGE_DIRS.add(pathname)
+        FakeFile.ALLOWED_SITE_PACKAGE_DIRS.add(os.path.realpath(pathname))
+      else:
+        FakeFile.ALLOWED_DIRS.add(pathname)
+        FakeFile.ALLOWED_DIRS.add(os.path.realpath(pathname))
+
+  @staticmethod
   def SetSkippedFiles(skip_files):
     """Sets which files in the application directory are to be ignored.
 
@@ -1024,6 +1065,10 @@
     super(FakeFile, self).__init__(filename, mode, bufsize, **kwargs)
 
 
+from google.appengine.dist import _library
+_library.SetAllowedModule = FakeFile.SetAllowedModule
+
+
 class RestrictedPathFunction(object):
   """Enforces access restrictions for functions that have a file or
   directory path as their first argument."""
@@ -2053,7 +2098,7 @@
     ClearAllButEncodingsModules(sys.modules)
     sys.modules.update(module_dict)
     sys.argv = [cgi_path]
-    sys.stdin = infile
+    sys.stdin = cStringIO.StringIO(infile.getvalue())
     sys.stdout = outfile
     os.environ.clear()
     os.environ.update(env)
@@ -2153,7 +2198,7 @@
       if base_env_dict:
         env.update(base_env_dict)
       cgi_path = self._path_adjuster.AdjustPath(path)
-      env.update(self._setup_env(cgi_path, relative_url, headers))
+      env.update(self._setup_env(cgi_path, relative_url, headers, infile))
       self._exec_cgi(self._root_path,
                      path,
                      cgi_path,
@@ -2855,8 +2900,10 @@
         if require_indexes:
           dev_appserver_index.SetupIndexes(config.application, root_path)
 
-        infile = cStringIO.StringIO(self.rfile.read(
+        infile = cStringIO.StringIO()
+        infile.write(self.rfile.read(
             int(self.headers.get('content-length', 0))))
+        infile.seek(0)
 
         request_size = len(infile.getvalue())
         if request_size > MAX_REQUEST_SIZE:
@@ -3153,6 +3200,8 @@
     app_id: Application ID being served.
 
   Keywords:
+    root_path: Root path to the directory of the application which should
+        contain the app.yaml, indexes.yaml, and queues.yaml files.
     login_url: Relative URL which should be used for handling user login/logout.
     datastore_path: Path to the file to store Datastore file stub data in.
     history_path: Path to the file to store Datastore history in.
@@ -3168,6 +3217,7 @@
       behavior is different from the real app server and should be left False
       except for advanced uses of dev_appserver.
   """
+  root_path = config.get('root_path', None)
   login_url = config['login_url']
   datastore_path = config['datastore_path']
   history_path = config['history_path']
@@ -3231,6 +3281,10 @@
     'capability_service',
     capability_stub.CapabilityServiceStub())
 
+  apiproxy_stub_map.apiproxy.RegisterStub(
+    'taskqueue',
+    taskqueue_stub.TaskQueueServiceStub(root_path=root_path))
+
 
   try:
     from google.appengine.api.images import images_stub
@@ -3276,7 +3330,6 @@
                      False,
                      False)
 
-
   admin_dispatcher = create_cgi_dispatcher(module_dict, root_path,
                                            path_adjuster)
   url_matcher.AddURL('/_ah/admin(?:/.*)?',
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py	Fri Jun 19 16:13:32 2009 +0200
@@ -417,6 +417,8 @@
   allow_skipped_files = option_dict[ARG_ALLOW_SKIPPED_FILES]
   static_caching = option_dict[ARG_STATIC_CACHING]
 
+  option_dict['root_path'] = os.path.realpath(root_path)
+
   logging.basicConfig(
     level=log_level,
     format='%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
--- a/thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py	Mon Jun 08 22:34:05 2009 +0200
+++ b/thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py	Fri Jun 19 16:13:32 2009 +0200
@@ -214,7 +214,7 @@
       return self.DebugFormatFixed32(value)
     return "%d" % value
   def DebugFormatInt64(self, value):
-    if (value <= -2000000000 or value >= 2000000000):
+    if (value <= -20000000000000 or value >= 20000000000000):
       return self.DebugFormatFixed64(value)
     return "%d" % value
   def DebugFormatString(self, value):