Load /Users/solydzajs/Downloads/google_appengine into
authorPawel Solyga <Pawel.Solyga@gmail.com>
Thu, 12 Feb 2009 12:30:36 +0000
changeset 1278 a7766286a7be
parent 1277 5c931bd3dc1e
child 1279 f85ac8856dca
Load /Users/solydzajs/Downloads/google_appengine into trunk/thirdparty/google_appengine.
thirdparty/google_appengine/RELEASE_NOTES
thirdparty/google_appengine/VERSION
thirdparty/google_appengine/appcfg.py
thirdparty/google_appengine/bulkload_client.py
thirdparty/google_appengine/bulkloader.py
thirdparty/google_appengine/dev_appserver.py
thirdparty/google_appengine/google/appengine/api/api_base_pb.py
thirdparty/google_appengine/google/appengine/api/apiproxy_rpc.py
thirdparty/google_appengine/google/appengine/api/appinfo.py
thirdparty/google_appengine/google/appengine/api/capabilities/capability_service_pb.py
thirdparty/google_appengine/google/appengine/api/croninfo.py
thirdparty/google_appengine/google/appengine/api/datastore.py
thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py
thirdparty/google_appengine/google/appengine/api/datastore_types.py
thirdparty/google_appengine/google/appengine/api/images/__init__.py
thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py
thirdparty/google_appengine/google/appengine/api/images/images_stub.py
thirdparty/google_appengine/google/appengine/api/mail_service_pb.py
thirdparty/google_appengine/google/appengine/api/mail_stub.py
thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py
thirdparty/google_appengine/google/appengine/api/memcache/memcache_stub.py
thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py
thirdparty/google_appengine/google/appengine/api/user_service_pb.py
thirdparty/google_appengine/google/appengine/api/users.py
thirdparty/google_appengine/google/appengine/base/capabilities_pb.py
thirdparty/google_appengine/google/appengine/cron/groctimespecification.py
thirdparty/google_appengine/google/appengine/datastore/datastore_index.py
thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py
thirdparty/google_appengine/google/appengine/datastore/entity_pb.py
thirdparty/google_appengine/google/appengine/dist/__init__.py
thirdparty/google_appengine/google/appengine/dist/ftplib.py
thirdparty/google_appengine/google/appengine/dist/httplib.py
thirdparty/google_appengine/google/appengine/dist/neo_cgi.py
thirdparty/google_appengine/google/appengine/dist/py_imp.py
thirdparty/google_appengine/google/appengine/dist/select.py
thirdparty/google_appengine/google/appengine/dist/socket.py
thirdparty/google_appengine/google/appengine/dist/subprocess.py
thirdparty/google_appengine/google/appengine/dist/tempfile.py
thirdparty/google_appengine/google/appengine/ext/bulkload/__init__.py
thirdparty/google_appengine/google/appengine/ext/db/__init__.py
thirdparty/google_appengine/google/appengine/ext/gql/__init__.py
thirdparty/google_appengine/google/appengine/ext/remote_api/__init__.py
thirdparty/google_appengine/google/appengine/ext/remote_api/handler.py
thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_pb.py
thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py
thirdparty/google_appengine/google/appengine/ext/search/__init__.py
thirdparty/google_appengine/google/appengine/ext/webapp/util.py
thirdparty/google_appengine/google/appengine/runtime/apiproxy.py
thirdparty/google_appengine/google/appengine/runtime/apiproxy_errors.py
thirdparty/google_appengine/google/appengine/tools/appcfg.py
thirdparty/google_appengine/google/appengine/tools/bulkloader.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py
thirdparty/google_appengine/lib/antlr3/antlr3/__init__.py
thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/PKG-INFO
thirdparty/google_appengine/lib/antlr3/setup.py
--- a/thirdparty/google_appengine/RELEASE_NOTES	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/RELEASE_NOTES	Thu Feb 12 12:30:36 2009 +0000
@@ -3,6 +3,45 @@
 
 App Engine SDK - Release Notes
 
+Version 1.1.9 - February 2, 2009
+================================
+
+  - HTTP Request and Response limit raised to 10MB from 1MB.
+    Note that API call limits remain at 1MB.
+      http://code.google.com/p/googleappengine/issues/detail?id=78
+  - urllib and urllib2 now available, implemented using urlfetch.
+    Also adds additional stubs which may enable other modules.
+      http://code.google.com/p/googleappengine/issues/detail?id=61
+      http://code.google.com/p/googleappengine/issues/detail?id=68
+      http://code.google.com/p/googleappengine/issues/detail?id=572
+      http://code.google.com/p/googleappengine/issues/detail?id=821
+  - Early release of a new data bulk upload tool, bulkloader.py
+      http://code.google.com/appengine/docs/python/tools/uploadingdata.html
+  - New remote_api for datastore at google.appengine.ext.remote_api
+  - Single property descending indexes are automatically generated.
+  - Added db.Query support for IN and != operators.
+      http://code.google.com/p/googleappengine/issues/detail?id=751
+  - Fixed issue where gql date/time parsing could not handle Unicode strings.
+  - Fixed issue with db model instance key() returning the wrong key for
+    unsaved instances with parent as key
+      http://code.google.com/p/googleappengine/issues/detail?id=883
+  - New run_in_transaction_custom_retries method for datastore.
+  - Fixed issue with relative dev_appserver datastore and history paths.
+      http://code.google.com/p/googleappengine/issues/detail?id=845
+  - Static files and skipped files are not readable in dev_appserver, to match
+    the behavior on App Engine.
+      http://code.google.com/p/googleappengine/issues/detail?id=550
+  - Images API allows multiple transforms of the same type in one request. A
+    limit of 10 total transforms per request has been added.
+  - PIL import will work with both PIL.Image and Image.
+      http://code.google.com/p/googleappengine/issues/detail?id=929
+  - Fixed an issue with sending email in dev_appserver when the application
+    code changed.
+      http://code.google.com/p/googleappengine/issues/detail?id=182
+  - Memcache counters (incr/decr) do nothing on non positive integers to match
+    the behavior on App Engine.
+      http://code.google.com/p/googleappengine/issues/detail?id=918
+
 Version 1.1.8 - January 7, 2008
 =================================
   - Skip_files RegexStr validator allows lists to for regex-ors.
--- a/thirdparty/google_appengine/VERSION	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/VERSION	Thu Feb 12 12:30:36 2009 +0000
@@ -1,3 +1,3 @@
-release: "1.1.8"
-timestamp: 1231809440
+release: "1.1.9"
+timestamp: 1232676672
 api_versions: ['1']
--- a/thirdparty/google_appengine/appcfg.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/appcfg.py	Thu Feb 12 12:30:36 2009 +0000
@@ -48,12 +48,12 @@
   "dev_appserver.py" : "dev_appserver_main.py"
 }
 
-def run_file(file_path, globals_):
+def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
   """Execute the file at the specified path with the passed-in globals."""
   sys.path = EXTRA_PATHS + sys.path
   script_name = os.path.basename(file_path)
   script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
-  script_path = os.path.join(SCRIPT_DIR, script_name)
+  script_path = os.path.join(script_dir, script_name)
   execfile(script_path, globals_)
 
 if __name__ == '__main__':
--- a/thirdparty/google_appengine/bulkload_client.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/bulkload_client.py	Thu Feb 12 12:30:36 2009 +0000
@@ -48,12 +48,12 @@
   "dev_appserver.py" : "dev_appserver_main.py"
 }
 
-def run_file(file_path, globals_):
+def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
   """Execute the file at the specified path with the passed-in globals."""
   sys.path = EXTRA_PATHS + sys.path
   script_name = os.path.basename(file_path)
   script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
-  script_path = os.path.join(SCRIPT_DIR, script_name)
+  script_path = os.path.join(script_dir, script_name)
   execfile(script_path, globals_)
 
 if __name__ == '__main__':
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/bulkloader.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Convenience wrapper for starting an appengine tool."""
+
+
+import os
+import sys
+
+if not hasattr(sys, 'version_info'):
+  sys.stderr.write('Very old versions of Python are not supported. Please '
+                   'use version 2.5 or greater.\n')
+  sys.exit(1)
+version_tuple = tuple(sys.version_info[:2])
+if version_tuple < (2, 4):
+  sys.stderr.write('Error: Python %d.%d is not supported. Please use '
+                   'version 2.5 or greater.\n' % version_tuple)
+  sys.exit(1)
+if version_tuple == (2, 4):
+  sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
+                   'break. Please use version 2.5 or greater.\n')
+
+DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
+
+EXTRA_PATHS = [
+  DIR_PATH,
+  os.path.join(DIR_PATH, 'lib', 'antlr3'),
+  os.path.join(DIR_PATH, 'lib', 'django'),
+  os.path.join(DIR_PATH, 'lib', 'webob'),
+  os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
+]
+
+SCRIPT_EXCEPTIONS = {
+  "dev_appserver.py" : "dev_appserver_main.py"
+}
+
+def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
+  """Execute the file at the specified path with the passed-in globals."""
+  sys.path = EXTRA_PATHS + sys.path
+  script_name = os.path.basename(file_path)
+  script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
+  script_path = os.path.join(script_dir, script_name)
+  execfile(script_path, globals_)
+
+if __name__ == '__main__':
+  run_file(__file__, globals())
--- a/thirdparty/google_appengine/dev_appserver.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/dev_appserver.py	Thu Feb 12 12:30:36 2009 +0000
@@ -48,12 +48,12 @@
   "dev_appserver.py" : "dev_appserver_main.py"
 }
 
-def run_file(file_path, globals_):
+def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
   """Execute the file at the specified path with the passed-in globals."""
   sys.path = EXTRA_PATHS + sys.path
   script_name = os.path.basename(file_path)
   script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
-  script_path = os.path.join(SCRIPT_DIR, script_name)
+  script_path = os.path.join(script_dir, script_name)
   execfile(script_path, globals_)
 
 if __name__ == '__main__':
--- a/thirdparty/google_appengine/google/appengine/api/api_base_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/api_base_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -36,8 +36,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = ""
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = ""
 
   def has_value(self): return self.has_value_
 
@@ -116,8 +117,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = 0
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = 0
 
   def has_value(self): return self.has_value_
 
@@ -196,8 +198,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = 0
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = 0
 
   def has_value(self): return self.has_value_
 
@@ -276,8 +279,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = 0
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = 0
 
   def has_value(self): return self.has_value_
 
@@ -355,8 +359,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = 0.0
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = 0.0
 
   def has_value(self): return self.has_value_
 
--- a/thirdparty/google_appengine/google/appengine/api/apiproxy_rpc.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/apiproxy_rpc.py	Thu Feb 12 12:30:36 2009 +0000
@@ -37,7 +37,7 @@
   FINISHING = 2
 
   def __init__(self, package=None, call=None, request=None, response=None,
-               callback=None, stub=None):
+               callback=None, deadline=None, stub=None):
     """Constructor for the RPC object.
 
     All arguments are optional, and simply set members on the class.
@@ -49,6 +49,8 @@
       request: ProtocolMessage instance, appropriate for the arguments
       response: ProtocolMessage instance, appropriate for the response
       callback: callable, called when call is complete
+      deadline: A double specifying the deadline for this call as the number of
+                seconds from the current time. Ignored if non-positive.
       stub: APIProxyStub instance, used in default _WaitImpl to do real call
     """
     self.__exception = None
@@ -60,10 +62,11 @@
     self.request = request
     self.response = response
     self.callback = callback
+    self.deadline = deadline
     self.stub = stub
 
   def MakeCall(self, package=None, call=None, request=None, response=None,
-               callback=None):
+               callback=None, deadline=None):
     """Makes an asynchronous (i.e. non-blocking) API call within the
     specified package for the specified call method.
 
@@ -81,6 +84,7 @@
     self.call = call or self.call
     self.request = request or self.request
     self.response = response or self.response
+    self.deadline = deadline or self.deadline
 
     assert self.__state is RPC.IDLE, ('RPC for %s.%s has already been started' %
                                       (self.package, self.call))
--- a/thirdparty/google_appengine/google/appengine/api/appinfo.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/appinfo.py	Thu Feb 12 12:30:36 2009 +0000
@@ -56,6 +56,8 @@
 
 RUNTIME_RE_STRING = r'[a-z]{1,30}'
 
+API_VERSION_RE_STRING = r'[\w.]{1,32}'
+
 HANDLER_STATIC_FILES = 'static_files'
 HANDLER_STATIC_DIR = 'static_dir'
 HANDLER_SCRIPT = 'script'
@@ -307,7 +309,7 @@
     RUNTIME: RUNTIME_RE_STRING,
 
 
-    API_VERSION: validation.Options('1', 'beta'),
+    API_VERSION: API_VERSION_RE_STRING,
     HANDLERS: validation.Optional(validation.Repeated(URLMap)),
     DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
     SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES)
--- a/thirdparty/google_appengine/google/appengine/api/capabilities/capability_service_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/capabilities/capability_service_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -39,8 +39,9 @@
     self.package_ = x
 
   def clear_package(self):
-    self.has_package_ = 0
-    self.package_ = ""
+    if self.has_package_:
+      self.has_package_ = 0
+      self.package_ = ""
 
   def has_package(self): return self.has_package_
 
@@ -216,8 +217,9 @@
     self.summary_status_ = x
 
   def clear_summary_status(self):
-    self.has_summary_status_ = 0
-    self.summary_status_ = 0
+    if self.has_summary_status_:
+      self.has_summary_status_ = 0
+      self.summary_status_ = 0
 
   def has_summary_status(self): return self.has_summary_status_
 
@@ -228,8 +230,9 @@
     self.time_until_scheduled_ = x
 
   def clear_time_until_scheduled(self):
-    self.has_time_until_scheduled_ = 0
-    self.time_until_scheduled_ = 0
+    if self.has_time_until_scheduled_:
+      self.has_time_until_scheduled_ = 0
+      self.time_until_scheduled_ = 0
 
   def has_time_until_scheduled(self): return self.has_time_until_scheduled_
 
--- a/thirdparty/google_appengine/google/appengine/api/croninfo.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/croninfo.py	Thu Feb 12 12:30:36 2009 +0000
@@ -23,6 +23,15 @@
 
 
 
+import logging
+import sys
+import traceback
+
+try:
+  import pytz
+except ImportError:
+  pytz = None
+
 from google.appengine.cron import groc
 from google.appengine.api import validation
 from google.appengine.api import yaml_builder
@@ -30,10 +39,7 @@
 from google.appengine.api import yaml_object
 
 _URL_REGEX = r'^/.*$'
-
-
 _TIMEZONE_REGEX = r'^.{0,100}$'
-
 _DESCRIPTION_REGEX = r'^.{0,499}$'
 
 
@@ -55,6 +61,31 @@
     return value
 
 
+class TimezoneValidator(validation.Validator):
+  """Checks that a timezone can be correctly parsed and is known."""
+
+  def Validate(self, value):
+    """Validates a timezone."""
+    if value is None:
+      return
+    if not isinstance(value, basestring):
+      raise TypeError('timezone must be a string, not \'%r\'' % type(value))
+    if pytz is None:
+      return value
+    try:
+      pytz.timezone(value)
+    except pytz.UnknownTimeZoneError:
+      raise validation.ValidationError('timezone \'%s\' is unknown' % value)
+    except IOError:
+      return value
+    except:
+      e, v, t = sys.exc_info()
+      logging.warning("pytz raised an unexpected error: %s.\n" % (v) +
+                      "Traceback:\n" + "\n".join(traceback.format_tb(t)))
+      raise
+    return value
+
+
 CRON = 'cron'
 
 URL = 'url'
@@ -73,7 +104,7 @@
   ATTRIBUTES = {
       URL: _URL_REGEX,
       SCHEDULE: GrocValidator(),
-      TIMEZONE: validation.Optional(_TIMEZONE_REGEX),
+      TIMEZONE: TimezoneValidator(),
       DESCRIPTION: validation.Optional(_DESCRIPTION_REGEX)
   }
 
--- a/thirdparty/google_appengine/google/appengine/api/datastore.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/datastore.py	Thu Feb 12 12:30:36 2009 +0000
@@ -31,6 +31,8 @@
 
 
 
+import heapq
+import itertools
 import logging
 import re
 import string
@@ -47,7 +49,9 @@
 from google.appengine.runtime import apiproxy_errors
 from google.appengine.datastore import entity_pb
 
-TRANSACTION_RETRIES = 3
+MAX_ALLOWABLE_QUERIES = 30
+
+DEFAULT_TRANSACTION_RETRIES = 3
 
 _MAX_INDEXED_PROPERTIES = 5000
 
@@ -488,7 +492,7 @@
       if isinstance(sample, list):
         sample = values[0]
 
-      if isinstance(sample, (datastore_types.Blob, datastore_types.Text)):
+      if isinstance(sample, datastore_types._RAW_PROPERTY_TYPES):
         pb.raw_property_list().extend(properties)
       else:
         pb.property_list().extend(properties)
@@ -1072,12 +1076,9 @@
       values = list(values)
     elif not isinstance(values, list):
       values = [values]
-    if isinstance(values[0], datastore_types.Blob):
+    if isinstance(values[0], datastore_types._RAW_PROPERTY_TYPES):
       raise datastore_errors.BadValueError(
-        'Filtering on Blob properties is not supported.')
-    if isinstance(values[0], datastore_types.Text):
-      raise datastore_errors.BadValueError(
-        'Filtering on Text properties is not supported.')
+        'Filtering on %s properties is not supported.' % typename(values[0]))
 
     if operator in self.INEQUALITY_OPERATORS:
       if self.__inequality_prop and property != self.__inequality_prop:
@@ -1165,6 +1166,306 @@
     return pb
 
 
+class MultiQuery(Query):
+  """Class representing a query which requires multiple datastore queries.
+
+  This class is actually a subclass of datastore.Query as it is intended to act
+  like a normal Query object (supporting the same interface).
+  """
+
+  def __init__(self, bound_queries, orderings):
+    if len(bound_queries) > MAX_ALLOWABLE_QUERIES:
+      raise datastore_errors.BadArgumentError(
+          'Cannot satisfy query -- too many subqueries (max: %d, got %d).'
+          ' Probable cause: too many IN/!= filters in query.' %
+          (MAX_ALLOWABLE_QUERIES, len(bound_queries)))
+    self.__bound_queries = bound_queries
+    self.__orderings = orderings
+
+  def __str__(self):
+    res = 'MultiQuery: '
+    for query in self.__bound_queries:
+      res = '%s %s' % (res, str(query))
+    return res
+
+  def Get(self, limit, offset=0):
+    """Get results of the query with a limit on the number of results.
+
+    Args:
+      limit: maximum number of values to return.
+      offset: offset requested -- if nonzero, this will override the offset in
+              the original query
+
+    Returns:
+      A list of entities with at most "limit" entries (less if the query
+      completes before reading limit values).
+    """
+    count = 1
+    result = []
+
+    iterator = self.Run()
+
+    try:
+      for i in xrange(offset):
+        val = iterator.next()
+    except StopIteration:
+      pass
+
+    try:
+      while count <= limit:
+        val = iterator.next()
+        result.append(val)
+        count += 1
+    except StopIteration:
+      pass
+    return result
+
+  class SortOrderEntity(object):
+    """Allow entity comparisons using provided orderings.
+
+    The iterator passed to the constructor is eventually consumed via
+    calls to GetNext(), which generate new SortOrderEntity s with the
+    same orderings.
+    """
+
+    def __init__(self, entity_iterator, orderings):
+      """Ctor.
+
+      Args:
+        entity_iterator: an iterator of entities which will be wrapped.
+        orderings: an iterable of (identifier, order) pairs. order
+          should be either Query.ASCENDING or Query.DESCENDING.
+      """
+      self.__entity_iterator = entity_iterator
+      self.__entity = None
+      self.__min_max_value_cache = {}
+      try:
+        self.__entity = entity_iterator.next()
+      except StopIteration:
+        pass
+      else:
+        self.__orderings = orderings
+
+    def __str__(self):
+      return str(self.__entity)
+
+    def GetEntity(self):
+      """Gets the wrapped entity."""
+      return self.__entity
+
+    def GetNext(self):
+      """Wrap and return the next entity.
+
+      The entity is retrieved from the iterator given at construction time.
+      """
+      return MultiQuery.SortOrderEntity(self.__entity_iterator,
+                                        self.__orderings)
+
+    def CmpProperties(self, that):
+      """Compare two entities and return their relative order.
+
+      Compares self to that based on the current sort orderings and the
+      key orders between them. Returns negative, 0, or positive depending on
+      whether self is less, equal to, or greater than that. This
+      comparison returns as if all values were to be placed in ascending order
+      (highest value last).  Only uses the sort orderings to compare (ignores
+       keys).
+
+      Args:
+        that: SortOrderEntity
+
+      Returns:
+        Negative if self < that
+        Zero if self == that
+        Positive if self > that
+      """
+      if not self.__entity:
+        return cmp(self.__entity, that.__entity)
+
+      for (identifier, order) in self.__orderings:
+        value1 = self.__GetValueForId(self, identifier, order)
+        value2 = self.__GetValueForId(that, identifier, order)
+
+        result = cmp(value1, value2)
+        if order == Query.DESCENDING:
+          result = -result
+        if result:
+          return result
+      return 0
+
+    def __GetValueForId(self, sort_order_entity, identifier, sort_order):
+      value = sort_order_entity.__entity[identifier]
+      entity_key = sort_order_entity.__entity.key()
+      if (entity_key, identifier) in self.__min_max_value_cache:
+        value = self.__min_max_value_cache[(entity_key, identifier)]
+      elif isinstance(value, list):
+        if sort_order == Query.DESCENDING:
+          value = min(value)
+        else:
+          value = max(value)
+        self.__min_max_value_cache[(entity_key, identifier)] = value
+
+      return value
+
+    def __cmp__(self, that):
+      """Compare self to that w.r.t. values defined in the sort order.
+
+      Compare an entity with another, using sort-order first, then the key
+      order to break ties. This can be used in a heap to have faster min-value
+      lookup.
+
+      Args:
+        that: other entity to compare to
+      Returns:
+        negative: if self is less than that in sort order
+        zero: if self is equal to that in sort order
+        positive: if self is greater than that in sort order
+      """
+      property_compare = self.CmpProperties(that)
+      if property_compare:
+        return property_compare
+      else:
+        return cmp(self.__entity.key(), that.__entity.key())
+
+  def Run(self):
+    """Return an iterable output with all results in order."""
+    results = []
+    count = 1
+    log_level = logging.DEBUG - 1
+    for bound_query in self.__bound_queries:
+      logging.log(log_level, 'Running query #%i' % count)
+      results.append(bound_query.Run())
+      count += 1
+
+    def IterateResults(results):
+      """Iterator function to return all results in sorted order.
+
+      Iterate over the array of results, yielding the next element, in
+      sorted order. This function is destructive (results will be empty
+      when the operation is complete).
+
+      Args:
+        results: list of result iterators to merge and iterate through
+
+      Yields:
+        The next result in sorted order.
+      """
+      result_heap = []
+      for result in results:
+        heap_value = MultiQuery.SortOrderEntity(result, self.__orderings)
+        if heap_value.GetEntity():
+          heapq.heappush(result_heap, heap_value)
+
+      used_keys = set()
+
+      while result_heap:
+        top_result = heapq.heappop(result_heap)
+
+        results_to_push = []
+        if top_result.GetEntity().key() not in used_keys:
+          yield top_result.GetEntity()
+        else:
+          pass
+
+        used_keys.add(top_result.GetEntity().key())
+
+        results_to_push = []
+        while result_heap:
+          next = heapq.heappop(result_heap)
+          if cmp(top_result, next):
+            results_to_push.append(next)
+            break
+          else:
+            results_to_push.append(next.GetNext())
+        results_to_push.append(top_result.GetNext())
+
+        for popped_result in results_to_push:
+          if popped_result.GetEntity():
+            heapq.heappush(result_heap, popped_result)
+
+    return IterateResults(results)
+
+  def Count(self, limit=None):
+    """Return the number of matched entities for this query.
+
+    Will return the de-duplicated count of results.  Will call the more
+    efficient Get() function if a limit is given.
+
+    Args:
+      limit: maximum number of entries to count (for any result > limit, return
+      limit).
+    Returns:
+      count of the number of entries returned.
+    """
+    if limit is None:
+      count = 0
+      for i in self.Run():
+        count += 1
+      return count
+    else:
+      return len(self.Get(limit))
+
+  def __setitem__(self, query_filter, value):
+    """Add a new filter by setting it on all subqueries.
+
+    If any of the setting operations raise an exception, the ones
+    that succeeded are undone and the exception is propagated
+    upward.
+
+    Args:
+      query_filter: a string of the form "property operand".
+      value: the value that the given property is compared against.
+    """
+    saved_items = []
+    for index, query in enumerate(self.__bound_queries):
+      saved_items.append(query.get(query_filter, None))
+      try:
+        query[query_filter] = value
+      except:
+        for q, old_value in itertools.izip(self.__bound_queries[:index],
+                                           saved_items):
+          if old_value is not None:
+            q[query_filter] = old_value
+          else:
+            del q[query_filter]
+        raise
+
+  def __delitem__(self, query_filter):
+    """Delete a filter by deleting it from all subqueries.
+
+    If a KeyError is raised during the attempt, it is ignored, unless
+    every subquery raised a KeyError. If any other exception is
+    raised, any deletes will be rolled back.
+
+    Args:
+      query_filter: the filter to delete.
+
+    Raises:
+      KeyError: No subquery had an entry containing query_filter.
+    """
+    subquery_count = len(self.__bound_queries)
+    keyerror_count = 0
+    saved_items = []
+    for index, query in enumerate(self.__bound_queries):
+      try:
+        saved_items.append(query.get(query_filter, None))
+        del query[query_filter]
+      except KeyError:
+        keyerror_count += 1
+      except:
+        for q, old_value in itertools.izip(self.__bound_queries[:index],
+                                           saved_items):
+          if old_value is not None:
+            q[query_filter] = old_value
+        raise
+
+    if keyerror_count == subquery_count:
+      raise KeyError(query_filter)
+
+  def __iter__(self):
+    return iter(self.__bound_queries)
+
+
 class Iterator(object):
   """An iterator over the results of a datastore query.
 
@@ -1331,8 +1632,29 @@
     self.modified_keys.update(keys)
 
 
+def RunInTransaction(function, *args, **kwargs):
+  """Runs a function inside a datastore transaction.
 
-def RunInTransaction(function, *args, **kwargs):
+     Runs the user-provided function inside transaction, retries default
+     number of times.
+
+    Args:
+    # a function to be run inside the transaction
+    function: callable
+    # positional arguments to pass to the function
+    args: variable number of any type
+
+  Returns:
+    the function's return value, if any
+
+  Raises:
+    TransactionFailedError, if the transaction could not be committed.
+  """
+  return RunInTransactionCustomRetries(
+      DEFAULT_TRANSACTION_RETRIES, function, *args, **kwargs)
+
+
+def RunInTransactionCustomRetries(retries, function, *args, **kwargs):
   """Runs a function inside a datastore transaction.
 
   Runs the user-provided function inside a full-featured, ACID datastore
@@ -1387,6 +1709,8 @@
   Nested transactions are not supported.
 
   Args:
+    # number of retries
+    retries: integer
     # a function to be run inside the transaction
     function: callable
     # positional arguments to pass to the function
@@ -1403,6 +1727,10 @@
     raise datastore_errors.BadRequestError(
       'Nested transactions are not supported.')
 
+  if retries < 0:
+    raise datastore_errors.BadRequestError(
+      'Number of retries should be non-negative number.')
+
   tx_key = None
 
   try:
@@ -1410,7 +1738,7 @@
     tx = _Transaction()
     _txes[tx_key] = tx
 
-    for i in range(0, TRANSACTION_RETRIES + 1):
+    for i in range(0, retries + 1):
       tx.modified_keys.clear()
 
       try:
@@ -1436,7 +1764,7 @@
 
       if tx.handle:
         try:
-          resp = api_base_pb.VoidProto()
+          resp = datastore_pb.CommitResponse()
           apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Commit',
                                          tx.handle, resp)
         except apiproxy_errors.ApplicationError, err:
@@ -1544,7 +1872,7 @@
   """Walks the stack to find a RunInTransaction() call.
 
   Returns:
-    # this is the RunInTransaction() frame record, if found
+    # this is the RunInTransactionCustomRetries() frame record, if found
     frame record or None
   """
   frame = sys._getframe()
@@ -1553,7 +1881,7 @@
   frame = frame.f_back.f_back
   while frame:
     if (frame.f_code.co_filename == filename and
-        frame.f_code.co_name == 'RunInTransaction'):
+        frame.f_code.co_name == 'RunInTransactionCustomRetries'):
       return frame
     frame = frame.f_back
 
--- a/thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py	Thu Feb 12 12:30:36 2009 +0000
@@ -128,6 +128,18 @@
     users.User: entity_pb.PropertyValue.kUserValueGroup,
     }
 
+  WRITE_ONLY = entity_pb.CompositeIndex.WRITE_ONLY
+  READ_WRITE = entity_pb.CompositeIndex.READ_WRITE
+  DELETED = entity_pb.CompositeIndex.DELETED
+  ERROR = entity_pb.CompositeIndex.ERROR
+
+  _INDEX_STATE_TRANSITIONS = {
+    WRITE_ONLY: frozenset((READ_WRITE, DELETED, ERROR)),
+    READ_WRITE: frozenset((DELETED,)),
+    ERROR: frozenset((DELETED,)),
+    DELETED: frozenset((ERROR,)),
+  }
+
   def __init__(self,
                app_id,
                datastore_file,
@@ -794,6 +806,7 @@
         for name, value_pb in props.items():
           prop_pb = kind_pb.add_property()
           prop_pb.set_name(name)
+          prop_pb.set_multiple(False)
           prop_pb.mutable_value().CopyFrom(value_pb)
 
         kinds.append(kind_pb)
@@ -838,7 +851,8 @@
     if not stored_index:
       raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
                                              "Index doesn't exist.")
-    elif index.state() != stored_index.state() + 1:
+    elif (index.state() != stored_index.state() and
+          index.state() not in self._INDEX_STATE_TRANSITIONS[stored_index.state()]):
       raise apiproxy_errors.ApplicationError(
         datastore_pb.Error.BAD_REQUEST,
         "cannot move index state from %s to %s" %
--- a/thirdparty/google_appengine/google/appengine/api/datastore_types.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/datastore_types.py	Thu Feb 12 12:30:36 2009 +0000
@@ -942,15 +942,7 @@
   users.User,
 ])
 
-_RAW_PROPERTY_TYPES = frozenset([
-  Blob,
-  Text,
-])
-
-_STRING_TYPES = frozenset([
-  str,
-  unicode,
-])
+_RAW_PROPERTY_TYPES = (Blob, Text)
 
 def ValidatePropertyInteger(name, value):
   """Raises an exception if the supplied integer is invalid.
@@ -1143,7 +1135,7 @@
 
 
 def DatetimeToTimestamp(value):
-  """Converts a datetime.datetime to seconds since the epoch, as a float.
+  """Converts a datetime.datetime to microseconds since the epoch, as a float.
   Args:
     value: datetime.datetime
 
--- a/thirdparty/google_appengine/google/appengine/api/images/__init__.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/images/__init__.py	Thu Feb 12 12:30:36 2009 +0000
@@ -42,6 +42,8 @@
 
 OUTPUT_ENCODING_TYPES = frozenset([JPEG, PNG])
 
+MAX_TRANSFORMS_PER_REQUEST = 10
+
 
 class Error(Exception):
   """Base error class for this module."""
@@ -84,29 +86,19 @@
 
     self._image_data = image_data
     self._transforms = []
-    self._transform_map = {}
     self._width = None
     self._height = None
 
-  def _check_transform_limits(self, transform):
+  def _check_transform_limits(self):
     """Ensure some simple limits on the number of transforms allowed.
 
-    Args:
-      transform: images_service_pb.ImagesServiceTransform, enum of the
-        trasnform called.
-
     Raises:
-      BadRequestError if the transform has already been requested for the image.
+      BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
+      requested for this image
     """
-    if not images_service_pb.ImagesServiceTransform.Type_Name(transform):
-      raise BadRequestError("'%s' is not a valid transform." % transform)
-
-    if transform in self._transform_map:
-      transform_name = images_service_pb.ImagesServiceTransform.Type_Name(
-          transform)
-      raise BadRequestError("A '%s' transform has already been "
-                            "requested on this image." % transform_name)
-    self._transform_map[transform] = True
+    if len(self._transforms) >= MAX_TRANSFORMS_PER_REQUEST:
+      raise BadRequestError("%d transforms have already been requested on this "
+                            "image." % MAX_TRANSFORMS_PER_REQUEST)
 
   def _update_dimensions(self):
     """Updates the width and height fields of the image.
@@ -172,14 +164,14 @@
       if (offset < size and ord(self._image_data[offset]) & 0xF0 == 0xC0 and
           ord(self._image_data[offset]) != 0xC4):
         offset += 4
-        if offset + 4 < size:
+        if offset + 4 <= size:
           self._height, self._width = struct.unpack(
               ">HH",
               self._image_data[offset:offset + 4])
           break
         else:
           raise BadImageError("Corrupt JPEG format")
-      elif offset + 2 <= size:
+      elif offset + 3 <= size:
         offset += 1
         offset += struct.unpack(">H", self._image_data[offset:offset + 2])[0]
       else:
@@ -199,7 +191,7 @@
     else:
       endianness = ">"
     ifd_offset = struct.unpack(endianness + "I", self._image_data[4:8])[0]
-    if ifd_offset < size + 14:
+    if ifd_offset + 14 <= size:
       ifd_size = struct.unpack(
           endianness + "H",
           self._image_data[ifd_offset:ifd_offset + 2])[0]
@@ -291,7 +283,8 @@
     Raises:
       TypeError when width or height is not either 'int' or 'long' types.
       BadRequestError when there is something wrong with the given height or
-        width or if a Resize has already been requested on this image.
+        width or if MAX_TRANSFORMS_PER_REQUEST transforms have already been
+        requested on this image.
     """
     if (not isinstance(width, (int, long)) or
         not isinstance(height, (int, long))):
@@ -305,8 +298,7 @@
     if width > 4000 or height > 4000:
       raise BadRequestError("Both width and height must be < 4000.")
 
-    self._check_transform_limits(
-        images_service_pb.ImagesServiceTransform.RESIZE)
+    self._check_transform_limits()
 
     transform = images_service_pb.Transform()
     transform.set_width(width)
@@ -323,7 +315,7 @@
     Raises:
       TypeError when degrees is not either 'int' or 'long' types.
       BadRequestError when there is something wrong with the given degrees or
-      if a Rotate trasnform has already been requested.
+      if MAX_TRANSFORMS_PER_REQUEST transforms have already been requested.
     """
     if not isinstance(degrees, (int, long)):
       raise TypeError("Degrees must be integers.")
@@ -333,8 +325,7 @@
 
     degrees = degrees % 360
 
-    self._check_transform_limits(
-        images_service_pb.ImagesServiceTransform.ROTATE)
+    self._check_transform_limits()
 
     transform = images_service_pb.Transform()
     transform.set_rotate(degrees)
@@ -345,11 +336,10 @@
     """Flip the image horizontally.
 
     Raises:
-      BadRequestError if a HorizontalFlip has already been requested on the
-      image.
+      BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
+      requested on the image.
     """
-    self._check_transform_limits(
-        images_service_pb.ImagesServiceTransform.HORIZONTAL_FLIP)
+    self._check_transform_limits()
 
     transform = images_service_pb.Transform()
     transform.set_horizontal_flip(True)
@@ -360,11 +350,10 @@
     """Flip the image vertically.
 
     Raises:
-      BadRequestError if a HorizontalFlip has already been requested on the
-      image.
+      BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
+      requested on the image.
     """
-    self._check_transform_limits(
-        images_service_pb.ImagesServiceTransform.VERTICAL_FLIP)
+    self._check_transform_limits()
     transform = images_service_pb.Transform()
     transform.set_vertical_flip(True)
 
@@ -405,7 +394,8 @@
     Raises:
       TypeError if the args are not of type 'float'.
       BadRequestError when there is something wrong with the given bounding box
-        or if there has already been a crop transform requested for this image.
+        or if MAX_TRANSFORMS_PER_REQUEST transforms have already been requested
+        for this image.
     """
     self._validate_crop_arg(left_x, "left_x")
     self._validate_crop_arg(top_y, "top_y")
@@ -417,7 +407,7 @@
     if top_y >= bottom_y:
       raise BadRequestError("top_y must be less than bottom_y")
 
-    self._check_transform_limits(images_service_pb.ImagesServiceTransform.CROP)
+    self._check_transform_limits()
 
     transform = images_service_pb.Transform()
     transform.set_crop_left_x(left_x)
@@ -433,11 +423,10 @@
     This is similar to the "I'm Feeling Lucky" button in Picasa.
 
     Raises:
-      BadRequestError if this transform has already been requested for this
-      image.
+      BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already
+      been requested for this image.
     """
-    self._check_transform_limits(
-        images_service_pb.ImagesServiceTransform.IM_FEELING_LUCKY)
+    self._check_transform_limits()
     transform = images_service_pb.Transform()
     transform.set_autolevels(True)
 
@@ -504,7 +493,6 @@
 
     self._image_data = response.image().content()
     self._transforms = []
-    self._transform_map.clear()
     self._width = None
     self._height = None
     return self._image_data
@@ -540,7 +528,7 @@
   Raises:
     TypeError when width or height not either 'int' or 'long' types.
     BadRequestError when there is something wrong with the given height or
-      width or if a Resize has already been requested on this image.
+      width.
     Error when something went wrong with the call.  See Image.ExecuteTransforms
       for more details.
   """
@@ -559,8 +547,7 @@
 
   Raises:
     TypeError when degrees is not either 'int' or 'long' types.
-    BadRequestError when there is something wrong with the given degrees or
-    if a Rotate trasnform has already been requested.
+    BadRequestError when there is something wrong with the given degrees.
     Error when something went wrong with the call.  See Image.ExecuteTransforms
       for more details.
   """
@@ -619,8 +606,7 @@
 
   Raises:
     TypeError if the args are not of type 'float'.
-    BadRequestError when there is something wrong with the given bounding box
-      or if there has already been a crop transform requested for this image.
+    BadRequestError when there is something wrong with the given bounding box.
     Error when something went wrong with the call.  See Image.ExecuteTransforms
       for more details.
   """
--- a/thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -192,8 +192,9 @@
     self.width_ = x
 
   def clear_width(self):
-    self.has_width_ = 0
-    self.width_ = 0
+    if self.has_width_:
+      self.has_width_ = 0
+      self.width_ = 0
 
   def has_width(self): return self.has_width_
 
@@ -204,8 +205,9 @@
     self.height_ = x
 
   def clear_height(self):
-    self.has_height_ = 0
-    self.height_ = 0
+    if self.has_height_:
+      self.has_height_ = 0
+      self.height_ = 0
 
   def has_height(self): return self.has_height_
 
@@ -216,8 +218,9 @@
     self.rotate_ = x
 
   def clear_rotate(self):
-    self.has_rotate_ = 0
-    self.rotate_ = 0
+    if self.has_rotate_:
+      self.has_rotate_ = 0
+      self.rotate_ = 0
 
   def has_rotate(self): return self.has_rotate_
 
@@ -228,8 +231,9 @@
     self.horizontal_flip_ = x
 
   def clear_horizontal_flip(self):
-    self.has_horizontal_flip_ = 0
-    self.horizontal_flip_ = 0
+    if self.has_horizontal_flip_:
+      self.has_horizontal_flip_ = 0
+      self.horizontal_flip_ = 0
 
   def has_horizontal_flip(self): return self.has_horizontal_flip_
 
@@ -240,8 +244,9 @@
     self.vertical_flip_ = x
 
   def clear_vertical_flip(self):
-    self.has_vertical_flip_ = 0
-    self.vertical_flip_ = 0
+    if self.has_vertical_flip_:
+      self.has_vertical_flip_ = 0
+      self.vertical_flip_ = 0
 
   def has_vertical_flip(self): return self.has_vertical_flip_
 
@@ -252,8 +257,9 @@
     self.crop_left_x_ = x
 
   def clear_crop_left_x(self):
-    self.has_crop_left_x_ = 0
-    self.crop_left_x_ = 0.0
+    if self.has_crop_left_x_:
+      self.has_crop_left_x_ = 0
+      self.crop_left_x_ = 0.0
 
   def has_crop_left_x(self): return self.has_crop_left_x_
 
@@ -264,8 +270,9 @@
     self.crop_top_y_ = x
 
   def clear_crop_top_y(self):
-    self.has_crop_top_y_ = 0
-    self.crop_top_y_ = 0.0
+    if self.has_crop_top_y_:
+      self.has_crop_top_y_ = 0
+      self.crop_top_y_ = 0.0
 
   def has_crop_top_y(self): return self.has_crop_top_y_
 
@@ -276,8 +283,9 @@
     self.crop_right_x_ = x
 
   def clear_crop_right_x(self):
-    self.has_crop_right_x_ = 0
-    self.crop_right_x_ = 1.0
+    if self.has_crop_right_x_:
+      self.has_crop_right_x_ = 0
+      self.crop_right_x_ = 1.0
 
   def has_crop_right_x(self): return self.has_crop_right_x_
 
@@ -288,8 +296,9 @@
     self.crop_bottom_y_ = x
 
   def clear_crop_bottom_y(self):
-    self.has_crop_bottom_y_ = 0
-    self.crop_bottom_y_ = 1.0
+    if self.has_crop_bottom_y_:
+      self.has_crop_bottom_y_ = 0
+      self.crop_bottom_y_ = 1.0
 
   def has_crop_bottom_y(self): return self.has_crop_bottom_y_
 
@@ -300,8 +309,9 @@
     self.autolevels_ = x
 
   def clear_autolevels(self):
-    self.has_autolevels_ = 0
-    self.autolevels_ = 0
+    if self.has_autolevels_:
+      self.has_autolevels_ = 0
+      self.autolevels_ = 0
 
   def has_autolevels(self): return self.has_autolevels_
 
@@ -521,8 +531,9 @@
     self.content_ = x
 
   def clear_content(self):
-    self.has_content_ = 0
-    self.content_ = ""
+    if self.has_content_:
+      self.has_content_ = 0
+      self.content_ = ""
 
   def has_content(self): return self.has_content_
 
@@ -613,8 +624,9 @@
     self.mime_type_ = x
 
   def clear_mime_type(self):
-    self.has_mime_type_ = 0
-    self.mime_type_ = 0
+    if self.has_mime_type_:
+      self.has_mime_type_ = 0
+      self.mime_type_ = 0
 
   def has_mime_type(self): return self.has_mime_type_
 
--- a/thirdparty/google_appengine/google/appengine/api/images/images_stub.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/images/images_stub.py	Thu Feb 12 12:30:36 2009 +0000
@@ -22,9 +22,13 @@
 import logging
 import StringIO
 
-import PIL
-from PIL import _imaging
-from PIL import Image
+try:
+  import PIL
+  from PIL import _imaging
+  from PIL import Image
+except ImportError:
+  import _imaging
+  import Image
 
 from google.appengine.api import apiproxy_stub
 from google.appengine.api import images
@@ -246,24 +250,6 @@
 
     return image.crop(box)
 
-  def _CheckTransformCount(self, transform_map, req_transform):
-    """Check that the requested transform hasn't already been set in map.
-
-    Args:
-      transform_map: {images_service_pb.ImagesServiceTransform: boolean}, map
-        to use to determine if the requested transform has been called.
-      req_transform: images_service_pb.ImagesServiceTransform, the requested
-        transform.
-
-    Raises:
-      BadRequestError if we are passed more than one of the same type of
-      transform.
-    """
-    if req_transform in transform_map:
-      raise apiproxy_errors.ApplicationError(
-          images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
-    transform_map[req_transform] = True
-
   def _ProcessTransforms(self, image, transforms):
     """Execute PIL operations based on transform values.
 
@@ -279,56 +265,29 @@
       transform.
     """
     new_image = image
-    transform_map = {}
+    if len(transforms) > images.MAX_TRANSFORMS_PER_REQUEST:
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
     for transform in transforms:
       if transform.has_width() or transform.has_height():
-        self._CheckTransformCount(
-            transform_map,
-            images_service_pb.ImagesServiceTransform.RESIZE
-        )
-
         new_image = self._Resize(new_image, transform)
 
       elif transform.has_rotate():
-        self._CheckTransformCount(
-            transform_map,
-            images_service_pb.ImagesServiceTransform.ROTATE
-        )
-
         new_image = self._Rotate(new_image, transform)
 
       elif transform.has_horizontal_flip():
-        self._CheckTransformCount(
-            transform_map,
-            images_service_pb.ImagesServiceTransform.HORIZONTAL_FLIP
-        )
-
         new_image = new_image.transpose(Image.FLIP_LEFT_RIGHT)
 
       elif transform.has_vertical_flip():
-        self._CheckTransformCount(
-            transform_map,
-            images_service_pb.ImagesServiceTransform.VERTICAL_FLIP
-        )
-
         new_image = new_image.transpose(Image.FLIP_TOP_BOTTOM)
 
       elif (transform.has_crop_left_x() or
           transform.has_crop_top_y() or
           transform.has_crop_right_x() or
           transform.has_crop_bottom_y()):
-        self._CheckTransformCount(
-            transform_map,
-            images_service_pb.ImagesServiceTransform.CROP
-        )
-
         new_image = self._Crop(new_image, transform)
 
       elif transform.has_autolevels():
-        self._CheckTransformCount(
-            transform_map,
-            images_service_pb.ImagesServiceTransform.IM_FEELING_LUCKY
-        )
         logging.info("I'm Feeling Lucky autolevels will be visible once this "
                      "application is deployed.")
       else:
--- a/thirdparty/google_appengine/google/appengine/api/mail_service_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/mail_service_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -107,8 +107,9 @@
     self.filename_ = x
 
   def clear_filename(self):
-    self.has_filename_ = 0
-    self.filename_ = ""
+    if self.has_filename_:
+      self.has_filename_ = 0
+      self.filename_ = ""
 
   def has_filename(self): return self.has_filename_
 
@@ -119,8 +120,9 @@
     self.data_ = x
 
   def clear_data(self):
-    self.has_data_ = 0
-    self.data_ = ""
+    if self.has_data_:
+      self.has_data_ = 0
+      self.data_ = ""
 
   def has_data(self): return self.has_data_
 
@@ -230,8 +232,9 @@
     self.sender_ = x
 
   def clear_sender(self):
-    self.has_sender_ = 0
-    self.sender_ = ""
+    if self.has_sender_:
+      self.has_sender_ = 0
+      self.sender_ = ""
 
   def has_sender(self): return self.has_sender_
 
@@ -242,8 +245,9 @@
     self.replyto_ = x
 
   def clear_replyto(self):
-    self.has_replyto_ = 0
-    self.replyto_ = ""
+    if self.has_replyto_:
+      self.has_replyto_ = 0
+      self.replyto_ = ""
 
   def has_replyto(self): return self.has_replyto_
 
@@ -299,8 +303,9 @@
     self.subject_ = x
 
   def clear_subject(self):
-    self.has_subject_ = 0
-    self.subject_ = ""
+    if self.has_subject_:
+      self.has_subject_ = 0
+      self.subject_ = ""
 
   def has_subject(self): return self.has_subject_
 
@@ -311,8 +316,9 @@
     self.textbody_ = x
 
   def clear_textbody(self):
-    self.has_textbody_ = 0
-    self.textbody_ = ""
+    if self.has_textbody_:
+      self.has_textbody_ = 0
+      self.textbody_ = ""
 
   def has_textbody(self): return self.has_textbody_
 
@@ -323,8 +329,9 @@
     self.htmlbody_ = x
 
   def clear_htmlbody(self):
-    self.has_htmlbody_ = 0
-    self.htmlbody_ = ""
+    if self.has_htmlbody_:
+      self.has_htmlbody_ = 0
+      self.htmlbody_ = ""
 
   def has_htmlbody(self): return self.has_htmlbody_
 
--- a/thirdparty/google_appengine/google/appengine/api/mail_stub.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/mail_stub.py	Thu Feb 12 12:30:36 2009 +0000
@@ -196,6 +196,8 @@
     if self._smtp_host and self._enable_sendmail:
       log('Both SMTP and sendmail are enabled.  Ignoring sendmail.')
 
+    import email
+
     mime_message = mail.MailMessageToMIMEMessage(request)
     if self._smtp_host:
       self._SendSMTP(mime_message, smtp_lib)
--- a/thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -189,8 +189,9 @@
     self.key_ = x
 
   def clear_key(self):
-    self.has_key_ = 0
-    self.key_ = ""
+    if self.has_key_:
+      self.has_key_ = 0
+      self.key_ = ""
 
   def has_key(self): return self.has_key_
 
@@ -201,8 +202,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = ""
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = ""
 
   def has_value(self): return self.has_value_
 
@@ -213,8 +215,9 @@
     self.flags_ = x
 
   def clear_flags(self):
-    self.has_flags_ = 0
-    self.flags_ = 0
+    if self.has_flags_:
+      self.has_flags_ = 0
+      self.flags_ = 0
 
   def has_flags(self): return self.has_flags_
 
@@ -418,8 +421,9 @@
     self.key_ = x
 
   def clear_key(self):
-    self.has_key_ = 0
-    self.key_ = ""
+    if self.has_key_:
+      self.has_key_ = 0
+      self.key_ = ""
 
   def has_key(self): return self.has_key_
 
@@ -430,8 +434,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = ""
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = ""
 
   def has_value(self): return self.has_value_
 
@@ -442,8 +447,9 @@
     self.flags_ = x
 
   def clear_flags(self):
-    self.has_flags_ = 0
-    self.flags_ = 0
+    if self.has_flags_:
+      self.has_flags_ = 0
+      self.flags_ = 0
 
   def has_flags(self): return self.has_flags_
 
@@ -454,8 +460,9 @@
     self.set_policy_ = x
 
   def clear_set_policy(self):
-    self.has_set_policy_ = 0
-    self.set_policy_ = 1
+    if self.has_set_policy_:
+      self.has_set_policy_ = 0
+      self.set_policy_ = 1
 
   def has_set_policy(self): return self.has_set_policy_
 
@@ -466,8 +473,9 @@
     self.expiration_time_ = x
 
   def clear_expiration_time(self):
-    self.has_expiration_time_ = 0
-    self.expiration_time_ = 0
+    if self.has_expiration_time_:
+      self.has_expiration_time_ = 0
+      self.expiration_time_ = 0
 
   def has_expiration_time(self): return self.has_expiration_time_
 
@@ -811,8 +819,9 @@
     self.key_ = x
 
   def clear_key(self):
-    self.has_key_ = 0
-    self.key_ = ""
+    if self.has_key_:
+      self.has_key_ = 0
+      self.key_ = ""
 
   def has_key(self): return self.has_key_
 
@@ -823,8 +832,9 @@
     self.delete_time_ = x
 
   def clear_delete_time(self):
-    self.has_delete_time_ = 0
-    self.delete_time_ = 0
+    if self.has_delete_time_:
+      self.has_delete_time_ = 0
+      self.delete_time_ = 0
 
   def has_delete_time(self): return self.has_delete_time_
 
@@ -1115,8 +1125,9 @@
     self.key_ = x
 
   def clear_key(self):
-    self.has_key_ = 0
-    self.key_ = ""
+    if self.has_key_:
+      self.has_key_ = 0
+      self.key_ = ""
 
   def has_key(self): return self.has_key_
 
@@ -1127,8 +1138,9 @@
     self.delta_ = x
 
   def clear_delta(self):
-    self.has_delta_ = 0
-    self.delta_ = 1
+    if self.has_delta_:
+      self.has_delta_ = 0
+      self.delta_ = 1
 
   def has_delta(self): return self.has_delta_
 
@@ -1139,8 +1151,9 @@
     self.direction_ = x
 
   def clear_direction(self):
-    self.has_direction_ = 0
-    self.direction_ = 1
+    if self.has_direction_:
+      self.has_direction_ = 0
+      self.direction_ = 1
 
   def has_direction(self): return self.has_direction_
 
@@ -1251,8 +1264,9 @@
     self.new_value_ = x
 
   def clear_new_value(self):
-    self.has_new_value_ = 0
-    self.new_value_ = 0
+    if self.has_new_value_:
+      self.has_new_value_ = 0
+      self.new_value_ = 0
 
   def has_new_value(self): return self.has_new_value_
 
@@ -1488,8 +1502,9 @@
     self.hits_ = x
 
   def clear_hits(self):
-    self.has_hits_ = 0
-    self.hits_ = 0
+    if self.has_hits_:
+      self.has_hits_ = 0
+      self.hits_ = 0
 
   def has_hits(self): return self.has_hits_
 
@@ -1500,8 +1515,9 @@
     self.misses_ = x
 
   def clear_misses(self):
-    self.has_misses_ = 0
-    self.misses_ = 0
+    if self.has_misses_:
+      self.has_misses_ = 0
+      self.misses_ = 0
 
   def has_misses(self): return self.has_misses_
 
@@ -1512,8 +1528,9 @@
     self.byte_hits_ = x
 
   def clear_byte_hits(self):
-    self.has_byte_hits_ = 0
-    self.byte_hits_ = 0
+    if self.has_byte_hits_:
+      self.has_byte_hits_ = 0
+      self.byte_hits_ = 0
 
   def has_byte_hits(self): return self.has_byte_hits_
 
@@ -1524,8 +1541,9 @@
     self.items_ = x
 
   def clear_items(self):
-    self.has_items_ = 0
-    self.items_ = 0
+    if self.has_items_:
+      self.has_items_ = 0
+      self.items_ = 0
 
   def has_items(self): return self.has_items_
 
@@ -1536,8 +1554,9 @@
     self.bytes_ = x
 
   def clear_bytes(self):
-    self.has_bytes_ = 0
-    self.bytes_ = 0
+    if self.has_bytes_:
+      self.has_bytes_ = 0
+      self.bytes_ = 0
 
   def has_bytes(self): return self.has_bytes_
 
@@ -1548,8 +1567,9 @@
     self.oldest_item_age_ = x
 
   def clear_oldest_item_age(self):
-    self.has_oldest_item_age_ = 0
-    self.oldest_item_age_ = 0
+    if self.has_oldest_item_age_:
+      self.has_oldest_item_age_ = 0
+      self.oldest_item_age_ = 0
 
   def has_oldest_item_age(self): return self.has_oldest_item_age_
 
@@ -1728,8 +1748,9 @@
   def mutable_stats(self): self.has_stats_ = 1; return self.stats()
 
   def clear_stats(self):
-    self.has_stats_ = 0;
-    if self.stats_ is not None: self.stats_.Clear()
+    if self.has_stats_:
+      self.has_stats_ = 0;
+      if self.stats_ is not None: self.stats_.Clear()
 
   def has_stats(self): return self.has_stats_
 
--- a/thirdparty/google_appengine/google/appengine/api/memcache/memcache_stub.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/memcache/memcache_stub.py	Thu Feb 12 12:30:36 2009 +0000
@@ -223,9 +223,11 @@
 
     try:
       old_value = long(entry.value)
+      if old_value < 0:
+        raise ValueError
     except ValueError, e:
       logging.error('Increment/decrement failed: Could not interpret '
-                    'value for key = "%s" as an integer.', key)
+                    'value for key = "%s" as an unsigned integer.', key)
       return
 
     delta = request.delta()
--- a/thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -109,8 +109,9 @@
     self.key_ = x
 
   def clear_key(self):
-    self.has_key_ = 0
-    self.key_ = ""
+    if self.has_key_:
+      self.has_key_ = 0
+      self.key_ = ""
 
   def has_key(self): return self.has_key_
 
@@ -121,8 +122,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = ""
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = ""
 
   def has_value(self): return self.has_value_
 
@@ -227,8 +229,9 @@
     self.method_ = x
 
   def clear_method(self):
-    self.has_method_ = 0
-    self.method_ = 0
+    if self.has_method_:
+      self.has_method_ = 0
+      self.method_ = 0
 
   def has_method(self): return self.has_method_
 
@@ -239,8 +242,9 @@
     self.url_ = x
 
   def clear_url(self):
-    self.has_url_ = 0
-    self.url_ = ""
+    if self.has_url_:
+      self.has_url_ = 0
+      self.url_ = ""
 
   def has_url(self): return self.has_url_
 
@@ -267,8 +271,9 @@
     self.payload_ = x
 
   def clear_payload(self):
-    self.has_payload_ = 0
-    self.payload_ = ""
+    if self.has_payload_:
+      self.has_payload_ = 0
+      self.payload_ = ""
 
   def has_payload(self): return self.has_payload_
 
@@ -279,8 +284,9 @@
     self.followredirects_ = x
 
   def clear_followredirects(self):
-    self.has_followredirects_ = 0
-    self.followredirects_ = 1
+    if self.has_followredirects_:
+      self.has_followredirects_ = 0
+      self.followredirects_ = 1
 
   def has_followredirects(self): return self.has_followredirects_
 
@@ -448,8 +454,9 @@
     self.key_ = x
 
   def clear_key(self):
-    self.has_key_ = 0
-    self.key_ = ""
+    if self.has_key_:
+      self.has_key_ = 0
+      self.key_ = ""
 
   def has_key(self): return self.has_key_
 
@@ -460,8 +467,9 @@
     self.value_ = x
 
   def clear_value(self):
-    self.has_value_ = 0
-    self.value_ = ""
+    if self.has_value_:
+      self.has_value_ = 0
+      self.value_ = ""
 
   def has_value(self): return self.has_value_
 
@@ -546,8 +554,9 @@
     self.content_ = x
 
   def clear_content(self):
-    self.has_content_ = 0
-    self.content_ = ""
+    if self.has_content_:
+      self.has_content_ = 0
+      self.content_ = ""
 
   def has_content(self): return self.has_content_
 
@@ -558,8 +567,9 @@
     self.statuscode_ = x
 
   def clear_statuscode(self):
-    self.has_statuscode_ = 0
-    self.statuscode_ = 0
+    if self.has_statuscode_:
+      self.has_statuscode_ = 0
+      self.statuscode_ = 0
 
   def has_statuscode(self): return self.has_statuscode_
 
@@ -586,8 +596,9 @@
     self.contentwastruncated_ = x
 
   def clear_contentwastruncated(self):
-    self.has_contentwastruncated_ = 0
-    self.contentwastruncated_ = 0
+    if self.has_contentwastruncated_:
+      self.has_contentwastruncated_ = 0
+      self.contentwastruncated_ = 0
 
   def has_contentwastruncated(self): return self.has_contentwastruncated_
 
--- a/thirdparty/google_appengine/google/appengine/api/user_service_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/user_service_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -27,10 +27,12 @@
 
   OK           =    0
   REDIRECT_URL_TOO_LONG =    1
+  NOT_ALLOWED  =    2
 
   _ErrorCode_NAMES = {
     0: "OK",
     1: "REDIRECT_URL_TOO_LONG",
+    2: "NOT_ALLOWED",
   }
 
   def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
--- a/thirdparty/google_appengine/google/appengine/api/users.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/users.py	Thu Feb 12 12:30:36 2009 +0000
@@ -20,9 +20,9 @@
 Classes defined here:
   User: object representing a user.
   Error: base exception type
-  BadRequestError: UserService exception
   UserNotFoundError: UserService exception
-  BackendError: UserService exception
+  RedirectTooLongError: UserService exception
+  NotAllowedError: UserService exception
 """
 
 
@@ -50,6 +50,10 @@
   """Raised by UserService calls if the generated redirect URL was too long.
   """
 
+class NotAllowedError(Error):
+  """Raised by UserService calls if the requested redirect URL is not allowed.
+  """
+
 
 class User(object):
   """A user.
@@ -147,6 +151,9 @@
     if (e.application_error ==
         user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
       raise RedirectTooLongError
+    elif (e.application_error ==
+        user_service_pb.UserServiceError.NOT_ALLOWED):
+      raise NotAllowedError
     else:
       raise e
   return resp.value()
--- a/thirdparty/google_appengine/google/appengine/base/capabilities_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/base/capabilities_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -59,8 +59,9 @@
   def mutable_default_config(self): self.has_default_config_ = 1; return self.default_config()
 
   def clear_default_config(self):
-    self.has_default_config_ = 0;
-    if self.default_config_ is not None: self.default_config_.Clear()
+    if self.has_default_config_:
+      self.has_default_config_ = 0;
+      if self.default_config_ is not None: self.default_config_.Clear()
 
   def has_default_config(self): return self.has_default_config_
 
@@ -203,8 +204,9 @@
     self.package_ = x
 
   def clear_package(self):
-    self.has_package_ = 0
-    self.package_ = ""
+    if self.has_package_:
+      self.has_package_ = 0
+      self.package_ = ""
 
   def has_package(self): return self.has_package_
 
@@ -215,8 +217,9 @@
     self.capability_ = x
 
   def clear_capability(self):
-    self.has_capability_ = 0
-    self.capability_ = ""
+    if self.has_capability_:
+      self.has_capability_ = 0
+      self.capability_ = ""
 
   def has_capability(self): return self.has_capability_
 
@@ -227,8 +230,9 @@
     self.status_ = x
 
   def clear_status(self):
-    self.has_status_ = 0
-    self.status_ = 4
+    if self.has_status_:
+      self.has_status_ = 0
+      self.status_ = 4
 
   def has_status(self): return self.has_status_
 
@@ -239,8 +243,9 @@
     self.scheduled_time_ = x
 
   def clear_scheduled_time(self):
-    self.has_scheduled_time_ = 0
-    self.scheduled_time_ = ""
+    if self.has_scheduled_time_:
+      self.has_scheduled_time_ = 0
+      self.scheduled_time_ = ""
 
   def has_scheduled_time(self): return self.has_scheduled_time_
 
@@ -251,8 +256,9 @@
     self.internal_message_ = x
 
   def clear_internal_message(self):
-    self.has_internal_message_ = 0
-    self.internal_message_ = ""
+    if self.has_internal_message_:
+      self.has_internal_message_ = 0
+      self.internal_message_ = ""
 
   def has_internal_message(self): return self.has_internal_message_
 
@@ -263,8 +269,9 @@
     self.admin_message_ = x
 
   def clear_admin_message(self):
-    self.has_admin_message_ = 0
-    self.admin_message_ = ""
+    if self.has_admin_message_:
+      self.has_admin_message_ = 0
+      self.admin_message_ = ""
 
   def has_admin_message(self): return self.has_admin_message_
 
@@ -275,8 +282,9 @@
     self.error_message_ = x
 
   def clear_error_message(self):
-    self.has_error_message_ = 0
-    self.error_message_ = ""
+    if self.has_error_message_:
+      self.has_error_message_ = 0
+      self.error_message_ = ""
 
   def has_error_message(self): return self.has_error_message_
 
--- a/thirdparty/google_appengine/google/appengine/cron/groctimespecification.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/cron/groctimespecification.py	Thu Feb 12 12:30:36 2009 +0000
@@ -22,25 +22,35 @@
 module takes a parsed schedule (produced by Antlr) and creates objects that
 can produce times that match this schedule.
 
-A parsed schedule is one of two types - an Interval, and a Specific Time.
+A parsed schedule is one of two types - an Interval or a Specific Time.
 See the class docstrings for more.
 
 Extensions to be considered:
 
   allowing a comma separated list of times to run
   allowing the user to specify particular days of the month to run
-
 """
 
 
 import calendar
 import datetime
 
+try:
+  import pytz
+except ImportError:
+  pytz = None
+
 import groc
 
 HOURS = 'hours'
 MINUTES = 'minutes'
 
+try:
+  from pytz import NonExistentTimeError
+except ImportError:
+  class NonExistentTimeError(Exception):
+    pass
+
 
 def GrocTimeSpecification(schedule):
   """Factory function.
@@ -53,7 +63,6 @@
   Returns:
     a TimeSpecification instance
   """
-
   parser = groc.CreateParser(schedule)
   parser.timespec()
 
@@ -71,7 +80,7 @@
     """Returns the next n times that match the schedule, starting at time start.
 
     Arguments:
-      start: a datetime to start from. Matches will start from after this time
+      start: a datetime to start from. Matches will start from after this time.
       n:     the number of matching times to return
 
     Returns:
@@ -89,7 +98,7 @@
     Must be implemented in subclasses.
 
     Arguments:
-      start: a datetime to start with. Matches will start from this time
+      start: a datetime to start with. Matches will start from this time.
 
     Returns:
       a datetime object
@@ -100,13 +109,14 @@
 class IntervalTimeSpecification(TimeSpecification):
   """A time specification for a given interval.
 
-  An Interval type spec runs at the given fixed interval. They have two
+  An Interval type spec runs at the given fixed interval. It has two
   attributes:
   period   - the type of interval, either "hours" or "minutes"
   interval - the number of units of type period.
+  timezone - the timezone for this specification. Not used in this class.
   """
 
-  def __init__(self, interval, period):
+  def __init__(self, interval, period, timezone=None):
     super(IntervalTimeSpecification, self).__init__(self)
     self.interval = interval
     self.period = period
@@ -115,7 +125,7 @@
     """Returns the next match after time 't'.
 
     Arguments:
-      t: a datetime to start from. Matches will start from after this time
+      t: a datetime to start from. Matches will start from after this time.
 
     Returns:
       a datetime object
@@ -129,47 +139,55 @@
 class SpecificTimeSpecification(TimeSpecification):
   """Specific time specification.
 
-  A Specific interval is more complex, but define a certain time to run, on
-  given days. They have the following attributes:
+  A Specific interval is more complex, but defines a certain time to run and
+  the days that it should run. It has the following attributes:
   time     - the time of day to run, as "HH:MM"
   ordinals - first, second, third &c, as a set of integers in 1..5
-  months   - the months that this is valid, as a set of integers in 1..12
-  weekdays - the days of the week to run this, 0=Sunday, 6=Saturday.
+  months   - the months that this should run, as a set of integers in 1..12
+  weekdays - the days of the week that this should run, as a set of integers,
+             0=Sunday, 6=Saturday
+  timezone - the optional timezone as a string for this specification.
+             Defaults to UTC - valid entries are things like Australia/Victoria
+             or PST8PDT.
 
-  The specific time interval can be quite complex. A schedule could look like
+  A specific time schedule can be quite complex. A schedule could look like
   this:
   "1st,third sat,sun of jan,feb,mar 09:15"
 
-  In this case, ordinals would be [1,3], weekdays [0,6], months [1,2,3] and time
-  would be "09:15".
+  In this case, ordinals would be {1,3}, weekdays {0,6}, months {1,2,3} and
+  time would be "09:15".
   """
 
+  timezone = None
+
   def __init__(self, ordinals=None, weekdays=None, months=None, monthdays=None,
-               timestr='00:00'):
+               timestr='00:00', timezone=None):
     super(SpecificTimeSpecification, self).__init__(self)
-    if weekdays and monthdays:
+    if weekdays is not None and monthdays is not None:
       raise ValueError("can't supply both monthdays and weekdays")
     if ordinals is None:
       self.ordinals = set(range(1, 6))
     else:
-      self.ordinals = ordinals
+      self.ordinals = set(ordinals)
 
     if weekdays is None:
       self.weekdays = set(range(7))
     else:
-      self.weekdays = weekdays
+      self.weekdays = set(weekdays)
 
     if months is None:
       self.months = set(range(1, 13))
     else:
-      self.months = months
+      self.months = set(months)
 
     if monthdays is None:
       self.monthdays = set()
     else:
-      self.monthdays = monthdays
+      self.monthdays = set(monthdays)
     hourstr, minutestr = timestr.split(':')
     self.time = datetime.time(int(hourstr), int(minutestr))
+    if timezone and pytz is not None:
+      self.timezone = pytz.timezone(timezone)
 
   def _MatchingDays(self, year, month):
     """Returns matching days for the given year and month.
@@ -225,33 +243,53 @@
     """Returns the next time that matches the schedule after time start.
 
     Arguments:
-      start: a datetime to start with. Matches will start after this time
+      start: a UTC datetime to start from. Matches will start after this time
 
     Returns:
       a datetime object
     """
     start_time = start
+    if self.timezone and pytz is not None:
+      if not start_time.tzinfo:
+        start_time = pytz.utc.localize(start_time)
+      start_time = start_time.astimezone(self.timezone)
+      start_time = start_time.replace(tzinfo=None)
     if self.months:
-      months = self._NextMonthGenerator(start.month, self.months)
+      months = self._NextMonthGenerator(start_time.month, self.months)
     while True:
       month, yearwraps = months.next()
-      candidate = start_time.replace(day=1, month=month,
+      candidate_month = start_time.replace(day=1, month=month,
                                      year=start_time.year + yearwraps)
 
       if self.monthdays:
-        _, last_day = calendar.monthrange(candidate.year, candidate.month)
-        day_matches = sorted([x for x in self.monthdays if x <= last_day])
+        _, last_day = calendar.monthrange(candidate_month.year,
+                                          candidate_month.month)
+        day_matches = sorted(x for x in self.monthdays if x <= last_day)
       else:
-        day_matches = self._MatchingDays(candidate.year, month)
+        day_matches = self._MatchingDays(candidate_month.year, month)
 
-      if ((candidate.year, candidate.month)
+      if ((candidate_month.year, candidate_month.month)
           == (start_time.year, start_time.month)):
         day_matches = [x for x in day_matches if x >= start_time.day]
-        if day_matches and day_matches[0] == start_time.day:
-          if start_time.time() >= self.time:
-            day_matches.pop(0)
-      if not day_matches:
-        continue
-      out = candidate.replace(day=day_matches[0], hour=self.time.hour,
-                              minute=self.time.minute, second=0, microsecond=0)
-      return out
+        while (day_matches and day_matches[0] == start_time.day
+            and start_time.time() >= self.time):
+          day_matches.pop(0)
+      while day_matches:
+        out = candidate_month.replace(day=day_matches[0], hour=self.time.hour,
+
+
+                                      minute=self.time.minute, second=0,
+                                      microsecond=0)
+        if self.timezone and pytz is not None:
+          try:
+            out = self.timezone.localize(out)
+          except (NonExistentTimeError, IndexError):
+            for _ in range(24):
+              out = out.replace(minute=1) + datetime.timedelta(minutes=60)
+              try:
+                out = self.timezone.localize(out)
+              except (NonExistentTimeError, IndexError):
+                continue
+              break
+          out = out.astimezone(pytz.utc)
+        return out
--- a/thirdparty/google_appengine/google/appengine/datastore/datastore_index.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/datastore/datastore_index.py	Thu Feb 12 12:30:36 2009 +0000
@@ -328,8 +328,7 @@
     else:
       props.append((prop_name, ASCENDING))
 
-  if (kind and not ancestor and
-      (not props or (len(props) == 1 and props[0][1] == ASCENDING))):
+  if kind and not ancestor and len(props) <= 1:
     required = False
 
     if props:
--- a/thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -44,8 +44,9 @@
     self.handle_ = x
 
   def clear_handle(self):
-    self.has_handle_ = 0
-    self.handle_ = 0
+    if self.has_handle_:
+      self.has_handle_ = 0
+      self.handle_ = 0
 
   def has_handle(self): return self.has_handle_
 
@@ -146,8 +147,9 @@
     self.op_ = x
 
   def clear_op(self):
-    self.has_op_ = 0
-    self.op_ = 0
+    if self.has_op_:
+      self.has_op_ = 0
+      self.op_ = 0
 
   def has_op(self): return self.has_op_
 
@@ -269,8 +271,9 @@
     self.property_ = x
 
   def clear_property(self):
-    self.has_property_ = 0
-    self.property_ = ""
+    if self.has_property_:
+      self.has_property_ = 0
+      self.property_ = ""
 
   def has_property(self): return self.has_property_
 
@@ -281,8 +284,9 @@
     self.direction_ = x
 
   def clear_direction(self):
-    self.has_direction_ = 0
-    self.direction_ = 1
+    if self.has_direction_:
+      self.has_direction_ = 0
+      self.direction_ = 1
 
   def has_direction(self): return self.has_direction_
 
@@ -391,8 +395,9 @@
     self.app_ = x
 
   def clear_app(self):
-    self.has_app_ = 0
-    self.app_ = ""
+    if self.has_app_:
+      self.has_app_ = 0
+      self.app_ = ""
 
   def has_app(self): return self.has_app_
 
@@ -403,8 +408,9 @@
     self.kind_ = x
 
   def clear_kind(self):
-    self.has_kind_ = 0
-    self.kind_ = ""
+    if self.has_kind_:
+      self.has_kind_ = 0
+      self.kind_ = ""
 
   def has_kind(self): return self.has_kind_
 
@@ -420,8 +426,9 @@
   def mutable_ancestor(self): self.has_ancestor_ = 1; return self.ancestor()
 
   def clear_ancestor(self):
-    self.has_ancestor_ = 0;
-    if self.ancestor_ is not None: self.ancestor_.Clear()
+    if self.has_ancestor_:
+      self.has_ancestor_ = 0;
+      if self.ancestor_ is not None: self.ancestor_.Clear()
 
   def has_ancestor(self): return self.has_ancestor_
 
@@ -448,8 +455,9 @@
     self.search_query_ = x
 
   def clear_search_query(self):
-    self.has_search_query_ = 0
-    self.search_query_ = ""
+    if self.has_search_query_:
+      self.has_search_query_ = 0
+      self.search_query_ = ""
 
   def has_search_query(self): return self.has_search_query_
 
@@ -476,8 +484,9 @@
     self.hint_ = x
 
   def clear_hint(self):
-    self.has_hint_ = 0
-    self.hint_ = 0
+    if self.has_hint_:
+      self.has_hint_ = 0
+      self.hint_ = 0
 
   def has_hint(self): return self.has_hint_
 
@@ -488,8 +497,9 @@
     self.offset_ = x
 
   def clear_offset(self):
-    self.has_offset_ = 0
-    self.offset_ = 0
+    if self.has_offset_:
+      self.has_offset_ = 0
+      self.offset_ = 0
 
   def has_offset(self): return self.has_offset_
 
@@ -500,8 +510,9 @@
     self.limit_ = x
 
   def clear_limit(self):
-    self.has_limit_ = 0
-    self.limit_ = 0
+    if self.has_limit_:
+      self.has_limit_ = 0
+      self.limit_ = 0
 
   def has_limit(self): return self.has_limit_
 
@@ -528,8 +539,9 @@
     self.require_perfect_plan_ = x
 
   def clear_require_perfect_plan(self):
-    self.has_require_perfect_plan_ = 0
-    self.require_perfect_plan_ = 0
+    if self.has_require_perfect_plan_:
+      self.has_require_perfect_plan_ = 0
+      self.require_perfect_plan_ = 0
 
   def has_require_perfect_plan(self): return self.has_require_perfect_plan_
 
@@ -851,8 +863,9 @@
     self.native_ancestor_ = x
 
   def clear_native_ancestor(self):
-    self.has_native_ancestor_ = 0
-    self.native_ancestor_ = 0
+    if self.has_native_ancestor_:
+      self.has_native_ancestor_ = 0
+      self.native_ancestor_ = 0
 
   def has_native_ancestor(self): return self.has_native_ancestor_
 
@@ -879,8 +892,9 @@
     self.native_offset_ = x
 
   def clear_native_offset(self):
-    self.has_native_offset_ = 0
-    self.native_offset_ = 0
+    if self.has_native_offset_:
+      self.has_native_offset_ = 0
+      self.native_offset_ = 0
 
   def has_native_offset(self): return self.has_native_offset_
 
@@ -891,8 +905,9 @@
     self.native_limit_ = x
 
   def clear_native_limit(self):
-    self.has_native_limit_ = 0
-    self.native_limit_ = 0
+    if self.has_native_limit_:
+      self.has_native_limit_ = 0
+      self.native_limit_ = 0
 
   def has_native_limit(self): return self.has_native_limit_
 
@@ -1031,8 +1046,9 @@
     self.cursor_ = x
 
   def clear_cursor(self):
-    self.has_cursor_ = 0
-    self.cursor_ = 0
+    if self.has_cursor_:
+      self.has_cursor_ = 0
+      self.cursor_ = 0
 
   def has_cursor(self): return self.has_cursor_
 
@@ -1178,8 +1194,9 @@
     self.index_writes_ = x
 
   def clear_index_writes(self):
-    self.has_index_writes_ = 0
-    self.index_writes_ = 0
+    if self.has_index_writes_:
+      self.has_index_writes_ = 0
+      self.index_writes_ = 0
 
   def has_index_writes(self): return self.has_index_writes_
 
@@ -1278,8 +1295,9 @@
   def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
 
   def clear_transaction(self):
-    self.has_transaction_ = 0;
-    if self.transaction_ is not None: self.transaction_.Clear()
+    if self.has_transaction_:
+      self.has_transaction_ = 0;
+      if self.transaction_ is not None: self.transaction_.Clear()
 
   def has_transaction(self): return self.has_transaction_
 
@@ -1400,8 +1418,9 @@
   def mutable_entity(self): self.has_entity_ = 1; return self.entity()
 
   def clear_entity(self):
-    self.has_entity_ = 0;
-    if self.entity_ is not None: self.entity_.Clear()
+    if self.has_entity_:
+      self.has_entity_ = 0;
+      if self.entity_ is not None: self.entity_.Clear()
 
   def has_entity(self): return self.has_entity_
 
@@ -1556,6 +1575,8 @@
 class PutRequest(ProtocolBuffer.ProtocolMessage):
   has_transaction_ = 0
   transaction_ = None
+  has_trusted_ = 0
+  trusted_ = 0
 
   def __init__(self, contents=None):
     self.entity_ = []
@@ -1591,8 +1612,9 @@
   def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
 
   def clear_transaction(self):
-    self.has_transaction_ = 0;
-    if self.transaction_ is not None: self.transaction_.Clear()
+    if self.has_transaction_:
+      self.has_transaction_ = 0;
+      if self.transaction_ is not None: self.transaction_.Clear()
 
   def has_transaction(self): return self.has_transaction_
 
@@ -1612,12 +1634,26 @@
 
   def clear_composite_index(self):
     self.composite_index_ = []
+  def trusted(self): return self.trusted_
+
+  def set_trusted(self, x):
+    self.has_trusted_ = 1
+    self.trusted_ = x
+
+  def clear_trusted(self):
+    if self.has_trusted_:
+      self.has_trusted_ = 0
+      self.trusted_ = 0
+
+  def has_trusted(self): return self.has_trusted_
+
 
   def MergeFrom(self, x):
     assert x is not self
     for i in xrange(x.entity_size()): self.add_entity().CopyFrom(x.entity(i))
     if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
     for i in xrange(x.composite_index_size()): self.add_composite_index().CopyFrom(x.composite_index(i))
+    if (x.has_trusted()): self.set_trusted(x.trusted())
 
   def Equals(self, x):
     if x is self: return 1
@@ -1629,6 +1665,8 @@
     if len(self.composite_index_) != len(x.composite_index_): return 0
     for e1, e2 in zip(self.composite_index_, x.composite_index_):
       if e1 != e2: return 0
+    if self.has_trusted_ != x.has_trusted_: return 0
+    if self.has_trusted_ and self.trusted_ != x.trusted_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -1647,12 +1685,14 @@
     if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
     n += 1 * len(self.composite_index_)
     for i in xrange(len(self.composite_index_)): n += self.lengthString(self.composite_index_[i].ByteSize())
+    if (self.has_trusted_): n += 2
     return n + 0
 
   def Clear(self):
     self.clear_entity()
     self.clear_transaction()
     self.clear_composite_index()
+    self.clear_trusted()
 
   def OutputUnchecked(self, out):
     for i in xrange(len(self.entity_)):
@@ -1667,6 +1707,9 @@
       out.putVarInt32(26)
       out.putVarInt32(self.composite_index_[i].ByteSize())
       self.composite_index_[i].OutputUnchecked(out)
+    if (self.has_trusted_):
+      out.putVarInt32(32)
+      out.putBoolean(self.trusted_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -1689,6 +1732,9 @@
         d.skip(length)
         self.add_composite_index().TryMerge(tmp)
         continue
+      if tt == 32:
+        self.set_trusted(d.getBoolean())
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -1715,17 +1761,20 @@
       res+=e.__str__(prefix + "  ", printElemNumber)
       res+=prefix+">\n"
       cnt+=1
+    if self.has_trusted_: res+=prefix+("trusted: %s\n" % self.DebugFormatBool(self.trusted_))
     return res
 
   kentity = 1
   ktransaction = 2
   kcomposite_index = 3
+  ktrusted = 4
 
   _TEXT = (
    "ErrorCode",
    "entity",
    "transaction",
    "composite_index",
+   "trusted",
   )
 
   _TYPES = (
@@ -1736,6 +1785,8 @@
 
    ProtocolBuffer.Encoder.STRING,
 
+   ProtocolBuffer.Encoder.NUMERIC,
+
   )
 
   _STYLE = """"""
@@ -1777,8 +1828,9 @@
   def mutable_cost(self): self.has_cost_ = 1; return self.cost()
 
   def clear_cost(self):
-    self.has_cost_ = 0;
-    if self.cost_ is not None: self.cost_.Clear()
+    if self.has_cost_:
+      self.has_cost_ = 0;
+      if self.cost_ is not None: self.cost_.Clear()
 
   def has_cost(self): return self.has_cost_
 
@@ -1882,6 +1934,8 @@
 class DeleteRequest(ProtocolBuffer.ProtocolMessage):
   has_transaction_ = 0
   transaction_ = None
+  has_trusted_ = 0
+  trusted_ = 0
 
   def __init__(self, contents=None):
     self.key_ = []
@@ -1916,16 +1970,31 @@
   def mutable_transaction(self): self.has_transaction_ = 1; return self.transaction()
 
   def clear_transaction(self):
-    self.has_transaction_ = 0;
-    if self.transaction_ is not None: self.transaction_.Clear()
+    if self.has_transaction_:
+      self.has_transaction_ = 0;
+      if self.transaction_ is not None: self.transaction_.Clear()
 
   def has_transaction(self): return self.has_transaction_
 
+  def trusted(self): return self.trusted_
+
+  def set_trusted(self, x):
+    self.has_trusted_ = 1
+    self.trusted_ = x
+
+  def clear_trusted(self):
+    if self.has_trusted_:
+      self.has_trusted_ = 0
+      self.trusted_ = 0
+
+  def has_trusted(self): return self.has_trusted_
+
 
   def MergeFrom(self, x):
     assert x is not self
     for i in xrange(x.key_size()): self.add_key().CopyFrom(x.key(i))
     if (x.has_transaction()): self.mutable_transaction().MergeFrom(x.transaction())
+    if (x.has_trusted()): self.set_trusted(x.trusted())
 
   def Equals(self, x):
     if x is self: return 1
@@ -1934,6 +2003,8 @@
       if e1 != e2: return 0
     if self.has_transaction_ != x.has_transaction_: return 0
     if self.has_transaction_ and self.transaction_ != x.transaction_: return 0
+    if self.has_trusted_ != x.has_trusted_: return 0
+    if self.has_trusted_ and self.trusted_ != x.trusted_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -1948,13 +2019,18 @@
     n += 1 * len(self.key_)
     for i in xrange(len(self.key_)): n += self.lengthString(self.key_[i].ByteSize())
     if (self.has_transaction_): n += 1 + self.lengthString(self.transaction_.ByteSize())
+    if (self.has_trusted_): n += 2
     return n + 0
 
   def Clear(self):
     self.clear_key()
     self.clear_transaction()
+    self.clear_trusted()
 
   def OutputUnchecked(self, out):
+    if (self.has_trusted_):
+      out.putVarInt32(32)
+      out.putBoolean(self.trusted_)
     if (self.has_transaction_):
       out.putVarInt32(42)
       out.putVarInt32(self.transaction_.ByteSize())
@@ -1967,6 +2043,9 @@
   def TryMerge(self, d):
     while d.avail() > 0:
       tt = d.getVarInt32()
+      if tt == 32:
+        self.set_trusted(d.getBoolean())
+        continue
       if tt == 42:
         length = d.getVarInt32()
         tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
@@ -1997,17 +2076,19 @@
       res+=prefix+"transaction <\n"
       res+=self.transaction_.__str__(prefix + "  ", printElemNumber)
       res+=prefix+">\n"
+    if self.has_trusted_: res+=prefix+("trusted: %s\n" % self.DebugFormatBool(self.trusted_))
     return res
 
   kkey = 6
   ktransaction = 5
+  ktrusted = 4
 
   _TEXT = (
    "ErrorCode",
    None,
    None,
    None,
-   None,
+   "trusted",
    "transaction",
    "key",
   )
@@ -2020,7 +2101,7 @@
 
    ProtocolBuffer.Encoder.MAX_TYPE,
 
-   ProtocolBuffer.Encoder.MAX_TYPE,
+   ProtocolBuffer.Encoder.NUMERIC,
 
    ProtocolBuffer.Encoder.STRING,
 
@@ -2050,8 +2131,9 @@
   def mutable_cost(self): self.has_cost_ = 1; return self.cost()
 
   def clear_cost(self):
-    self.has_cost_ = 0;
-    if self.cost_ is not None: self.cost_.Clear()
+    if self.has_cost_:
+      self.has_cost_ = 0;
+      if self.cost_ is not None: self.cost_.Clear()
 
   def has_cost(self): return self.has_cost_
 
@@ -2145,8 +2227,9 @@
     self.count_ = x
 
   def clear_count(self):
-    self.has_count_ = 0
-    self.count_ = 1
+    if self.has_count_:
+      self.has_count_ = 0
+      self.count_ = 1
 
   def has_count(self): return self.has_count_
 
@@ -2258,8 +2341,9 @@
   def mutable_cursor(self): self.has_cursor_ = 1; return self.cursor()
 
   def clear_cursor(self):
-    self.has_cursor_ = 0;
-    if self.cursor_ is not None: self.cursor_.Clear()
+    if self.has_cursor_:
+      self.has_cursor_ = 0;
+      if self.cursor_ is not None: self.cursor_.Clear()
 
   def has_cursor(self): return self.has_cursor_
 
@@ -2286,8 +2370,9 @@
     self.more_results_ = x
 
   def clear_more_results(self):
-    self.has_more_results_ = 0
-    self.more_results_ = 0
+    if self.has_more_results_:
+      self.has_more_results_ = 0
+      self.more_results_ = 0
 
   def has_more_results(self): return self.has_more_results_
 
@@ -2596,5 +2681,97 @@
 
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
-
-__all__ = ['Transaction','Query','Query_Filter','Query_Order','QueryExplanation','Cursor','Error','Cost','GetRequest','GetResponse','GetResponse_Entity','PutRequest','PutResponse','DeleteRequest','DeleteResponse','NextRequest','QueryResult','Schema','CompositeIndices']
+class CommitResponse(ProtocolBuffer.ProtocolMessage):
+  has_cost_ = 0
+  cost_ = None
+
+  def __init__(self, contents=None):
+    self.lazy_init_lock_ = thread.allocate_lock()
+    if contents is not None: self.MergeFromString(contents)
+
+  def cost(self):
+    if self.cost_ is None:
+      self.lazy_init_lock_.acquire()
+      try:
+        if self.cost_ is None: self.cost_ = Cost()
+      finally:
+        self.lazy_init_lock_.release()
+    return self.cost_
+
+  def mutable_cost(self): self.has_cost_ = 1; return self.cost()
+
+  def clear_cost(self):
+    if self.has_cost_:
+      self.has_cost_ = 0;
+      if self.cost_ is not None: self.cost_.Clear()
+
+  def has_cost(self): return self.has_cost_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_cost()): self.mutable_cost().MergeFrom(x.cost())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_cost_ != x.has_cost_: return 0
+    if self.has_cost_ and self.cost_ != x.cost_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (self.has_cost_ and not self.cost_.IsInitialized(debug_strs)): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    if (self.has_cost_): n += 1 + self.lengthString(self.cost_.ByteSize())
+    return n + 0
+
+  def Clear(self):
+    self.clear_cost()
+
+  def OutputUnchecked(self, out):
+    if (self.has_cost_):
+      out.putVarInt32(10)
+      out.putVarInt32(self.cost_.ByteSize())
+      self.cost_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_cost().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_cost_:
+      res+=prefix+"cost <\n"
+      res+=self.cost_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kcost = 1
+
+  _TEXT = (
+   "ErrorCode",
+   "cost",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+
+__all__ = ['Transaction','Query','Query_Filter','Query_Order','QueryExplanation','Cursor','Error','Cost','GetRequest','GetResponse','GetResponse_Entity','PutRequest','PutResponse','DeleteRequest','DeleteResponse','NextRequest','QueryResult','Schema','CompositeIndices','CommitResponse']
--- a/thirdparty/google_appengine/google/appengine/datastore/entity_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/datastore/entity_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -40,8 +40,9 @@
     self.type_ = x
 
   def clear_type(self):
-    self.has_type_ = 0
-    self.type_ = ""
+    if self.has_type_:
+      self.has_type_ = 0
+      self.type_ = ""
 
   def has_type(self): return self.has_type_
 
@@ -52,8 +53,9 @@
     self.id_ = x
 
   def clear_id(self):
-    self.has_id_ = 0
-    self.id_ = 0
+    if self.has_id_:
+      self.has_id_ = 0
+      self.id_ = 0
 
   def has_id(self): return self.has_id_
 
@@ -64,8 +66,9 @@
     self.name_ = x
 
   def clear_name(self):
-    self.has_name_ = 0
-    self.name_ = ""
+    if self.has_name_:
+      self.has_name_ = 0
+      self.name_ = ""
 
   def has_name(self): return self.has_name_
 
@@ -156,8 +159,9 @@
     self.x_ = x
 
   def clear_x(self):
-    self.has_x_ = 0
-    self.x_ = 0.0
+    if self.has_x_:
+      self.has_x_ = 0
+      self.x_ = 0.0
 
   def has_x(self): return self.has_x_
 
@@ -168,8 +172,9 @@
     self.y_ = x
 
   def clear_y(self):
-    self.has_y_ = 0
-    self.y_ = 0.0
+    if self.has_y_:
+      self.has_y_ = 0
+      self.y_ = 0.0
 
   def has_y(self): return self.has_y_
 
@@ -253,8 +258,9 @@
     self.email_ = x
 
   def clear_email(self):
-    self.has_email_ = 0
-    self.email_ = ""
+    if self.has_email_:
+      self.has_email_ = 0
+      self.email_ = ""
 
   def has_email(self): return self.has_email_
 
@@ -265,8 +271,9 @@
     self.auth_domain_ = x
 
   def clear_auth_domain(self):
-    self.has_auth_domain_ = 0
-    self.auth_domain_ = ""
+    if self.has_auth_domain_:
+      self.has_auth_domain_ = 0
+      self.auth_domain_ = ""
 
   def has_auth_domain(self): return self.has_auth_domain_
 
@@ -277,8 +284,9 @@
     self.nickname_ = x
 
   def clear_nickname(self):
-    self.has_nickname_ = 0
-    self.nickname_ = ""
+    if self.has_nickname_:
+      self.has_nickname_ = 0
+      self.nickname_ = ""
 
   def has_nickname(self): return self.has_nickname_
 
@@ -289,8 +297,9 @@
     self.gaiaid_ = x
 
   def clear_gaiaid(self):
-    self.has_gaiaid_ = 0
-    self.gaiaid_ = 0
+    if self.has_gaiaid_:
+      self.has_gaiaid_ = 0
+      self.gaiaid_ = 0
 
   def has_gaiaid(self): return self.has_gaiaid_
 
@@ -398,8 +407,9 @@
     self.app_ = x
 
   def clear_app(self):
-    self.has_app_ = 0
-    self.app_ = ""
+    if self.has_app_:
+      self.has_app_ = 0
+      self.app_ = ""
 
   def has_app(self): return self.has_app_
 
@@ -517,8 +527,9 @@
     self.int64value_ = x
 
   def clear_int64value(self):
-    self.has_int64value_ = 0
-    self.int64value_ = 0
+    if self.has_int64value_:
+      self.has_int64value_ = 0
+      self.int64value_ = 0
 
   def has_int64value(self): return self.has_int64value_
 
@@ -529,8 +540,9 @@
     self.booleanvalue_ = x
 
   def clear_booleanvalue(self):
-    self.has_booleanvalue_ = 0
-    self.booleanvalue_ = 0
+    if self.has_booleanvalue_:
+      self.has_booleanvalue_ = 0
+      self.booleanvalue_ = 0
 
   def has_booleanvalue(self): return self.has_booleanvalue_
 
@@ -541,8 +553,9 @@
     self.stringvalue_ = x
 
   def clear_stringvalue(self):
-    self.has_stringvalue_ = 0
-    self.stringvalue_ = ""
+    if self.has_stringvalue_:
+      self.has_stringvalue_ = 0
+      self.stringvalue_ = ""
 
   def has_stringvalue(self): return self.has_stringvalue_
 
@@ -553,8 +566,9 @@
     self.doublevalue_ = x
 
   def clear_doublevalue(self):
-    self.has_doublevalue_ = 0
-    self.doublevalue_ = 0.0
+    if self.has_doublevalue_:
+      self.has_doublevalue_ = 0
+      self.doublevalue_ = 0.0
 
   def has_doublevalue(self): return self.has_doublevalue_
 
@@ -570,8 +584,9 @@
   def mutable_pointvalue(self): self.has_pointvalue_ = 1; return self.pointvalue()
 
   def clear_pointvalue(self):
-    self.has_pointvalue_ = 0;
-    if self.pointvalue_ is not None: self.pointvalue_.Clear()
+    if self.has_pointvalue_:
+      self.has_pointvalue_ = 0;
+      if self.pointvalue_ is not None: self.pointvalue_.Clear()
 
   def has_pointvalue(self): return self.has_pointvalue_
 
@@ -587,8 +602,9 @@
   def mutable_uservalue(self): self.has_uservalue_ = 1; return self.uservalue()
 
   def clear_uservalue(self):
-    self.has_uservalue_ = 0;
-    if self.uservalue_ is not None: self.uservalue_.Clear()
+    if self.has_uservalue_:
+      self.has_uservalue_ = 0;
+      if self.uservalue_ is not None: self.uservalue_.Clear()
 
   def has_uservalue(self): return self.has_uservalue_
 
@@ -604,8 +620,9 @@
   def mutable_referencevalue(self): self.has_referencevalue_ = 1; return self.referencevalue()
 
   def clear_referencevalue(self):
-    self.has_referencevalue_ = 0;
-    if self.referencevalue_ is not None: self.referencevalue_.Clear()
+    if self.has_referencevalue_:
+      self.has_referencevalue_ = 0;
+      if self.referencevalue_ is not None: self.referencevalue_.Clear()
 
   def has_referencevalue(self): return self.has_referencevalue_
 
@@ -884,8 +901,9 @@
     self.meaning_ = x
 
   def clear_meaning(self):
-    self.has_meaning_ = 0
-    self.meaning_ = 0
+    if self.has_meaning_:
+      self.has_meaning_ = 0
+      self.meaning_ = 0
 
   def has_meaning(self): return self.has_meaning_
 
@@ -896,8 +914,9 @@
     self.meaning_uri_ = x
 
   def clear_meaning_uri(self):
-    self.has_meaning_uri_ = 0
-    self.meaning_uri_ = ""
+    if self.has_meaning_uri_:
+      self.has_meaning_uri_ = 0
+      self.meaning_uri_ = ""
 
   def has_meaning_uri(self): return self.has_meaning_uri_
 
@@ -908,8 +927,9 @@
     self.name_ = x
 
   def clear_name(self):
-    self.has_name_ = 0
-    self.name_ = ""
+    if self.has_name_:
+      self.has_name_ = 0
+      self.name_ = ""
 
   def has_name(self): return self.has_name_
 
@@ -928,8 +948,9 @@
     self.multiple_ = x
 
   def clear_multiple(self):
-    self.has_multiple_ = 0
-    self.multiple_ = 0
+    if self.has_multiple_:
+      self.has_multiple_ = 0
+      self.multiple_ = 0
 
   def has_multiple(self): return self.has_multiple_
 
@@ -1087,8 +1108,9 @@
     self.type_ = x
 
   def clear_type(self):
-    self.has_type_ = 0
-    self.type_ = ""
+    if self.has_type_:
+      self.has_type_ = 0
+      self.type_ = ""
 
   def has_type(self): return self.has_type_
 
@@ -1099,8 +1121,9 @@
     self.id_ = x
 
   def clear_id(self):
-    self.has_id_ = 0
-    self.id_ = 0
+    if self.has_id_:
+      self.has_id_ = 0
+      self.id_ = 0
 
   def has_id(self): return self.has_id_
 
@@ -1111,8 +1134,9 @@
     self.name_ = x
 
   def clear_name(self):
-    self.has_name_ = 0
-    self.name_ = ""
+    if self.has_name_:
+      self.has_name_ = 0
+      self.name_ = ""
 
   def has_name(self): return self.has_name_
 
@@ -1307,8 +1331,9 @@
     self.app_ = x
 
   def clear_app(self):
-    self.has_app_ = 0
-    self.app_ = ""
+    if self.has_app_:
+      self.has_app_ = 0
+      self.app_ = ""
 
   def has_app(self): return self.has_app_
 
@@ -1464,8 +1489,9 @@
     self.email_ = x
 
   def clear_email(self):
-    self.has_email_ = 0
-    self.email_ = ""
+    if self.has_email_:
+      self.has_email_ = 0
+      self.email_ = ""
 
   def has_email(self): return self.has_email_
 
@@ -1476,8 +1502,9 @@
     self.auth_domain_ = x
 
   def clear_auth_domain(self):
-    self.has_auth_domain_ = 0
-    self.auth_domain_ = ""
+    if self.has_auth_domain_:
+      self.has_auth_domain_ = 0
+      self.auth_domain_ = ""
 
   def has_auth_domain(self): return self.has_auth_domain_
 
@@ -1488,8 +1515,9 @@
     self.nickname_ = x
 
   def clear_nickname(self):
-    self.has_nickname_ = 0
-    self.nickname_ = ""
+    if self.has_nickname_:
+      self.has_nickname_ = 0
+      self.nickname_ = ""
 
   def has_nickname(self): return self.has_nickname_
 
@@ -1500,8 +1528,9 @@
     self.gaiaid_ = x
 
   def clear_gaiaid(self):
-    self.has_gaiaid_ = 0
-    self.gaiaid_ = 0
+    if self.has_gaiaid_:
+      self.has_gaiaid_ = 0
+      self.gaiaid_ = 0
 
   def has_gaiaid(self): return self.has_gaiaid_
 
@@ -1680,8 +1709,9 @@
   def mutable_owner(self): self.has_owner_ = 1; return self.owner()
 
   def clear_owner(self):
-    self.has_owner_ = 0;
-    if self.owner_ is not None: self.owner_.Clear()
+    if self.has_owner_:
+      self.has_owner_ = 0;
+      if self.owner_ is not None: self.owner_.Clear()
 
   def has_owner(self): return self.has_owner_
 
@@ -1692,8 +1722,9 @@
     self.kind_ = x
 
   def clear_kind(self):
-    self.has_kind_ = 0
-    self.kind_ = 0
+    if self.has_kind_:
+      self.has_kind_ = 0
+      self.kind_ = 0
 
   def has_kind(self): return self.has_kind_
 
@@ -1704,8 +1735,9 @@
     self.kind_uri_ = x
 
   def clear_kind_uri(self):
-    self.has_kind_uri_ = 0
-    self.kind_uri_ = ""
+    if self.has_kind_uri_:
+      self.has_kind_uri_ = 0
+      self.kind_uri_ = ""
 
   def has_kind_uri(self): return self.has_kind_uri_
 
@@ -2000,8 +2032,9 @@
     self.index_id_ = x
 
   def clear_index_id(self):
-    self.has_index_id_ = 0
-    self.index_id_ = 0
+    if self.has_index_id_:
+      self.has_index_id_ = 0
+      self.index_id_ = 0
 
   def has_index_id(self): return self.has_index_id_
 
@@ -2132,8 +2165,9 @@
     self.name_ = x
 
   def clear_name(self):
-    self.has_name_ = 0
-    self.name_ = ""
+    if self.has_name_:
+      self.has_name_ = 0
+      self.name_ = ""
 
   def has_name(self): return self.has_name_
 
@@ -2144,8 +2178,9 @@
     self.direction_ = x
 
   def clear_direction(self):
-    self.has_direction_ = 0
-    self.direction_ = 1
+    if self.has_direction_:
+      self.has_direction_ = 0
+      self.direction_ = 1
 
   def has_direction(self): return self.has_direction_
 
@@ -2225,8 +2260,9 @@
     self.entity_type_ = x
 
   def clear_entity_type(self):
-    self.has_entity_type_ = 0
-    self.entity_type_ = ""
+    if self.has_entity_type_:
+      self.has_entity_type_ = 0
+      self.entity_type_ = ""
 
   def has_entity_type(self): return self.has_entity_type_
 
@@ -2237,8 +2273,9 @@
     self.ancestor_ = x
 
   def clear_ancestor(self):
-    self.has_ancestor_ = 0
-    self.ancestor_ = 0
+    if self.has_ancestor_:
+      self.has_ancestor_ = 0
+      self.ancestor_ = 0
 
   def has_ancestor(self): return self.has_ancestor_
 
@@ -2409,8 +2446,9 @@
     self.app_id_ = x
 
   def clear_app_id(self):
-    self.has_app_id_ = 0
-    self.app_id_ = ""
+    if self.has_app_id_:
+      self.has_app_id_ = 0
+      self.app_id_ = ""
 
   def has_app_id(self): return self.has_app_id_
 
@@ -2421,8 +2459,9 @@
     self.id_ = x
 
   def clear_id(self):
-    self.has_id_ = 0
-    self.id_ = 0
+    if self.has_id_:
+      self.has_id_ = 0
+      self.id_ = 0
 
   def has_id(self): return self.has_id_
 
@@ -2441,8 +2480,9 @@
     self.state_ = x
 
   def clear_state(self):
-    self.has_state_ = 0
-    self.state_ = 0
+    if self.has_state_:
+      self.has_state_ = 0
+      self.state_ = 0
 
   def has_state(self): return self.has_state_
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/__init__.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Specify the modules for which a stub exists."""
+
+
+__all__ = ['ftplib', 'httplib', 'py_imp', 'neo_cgi', 'select', 'socket',
+           'subprocess', 'tempfile']
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/ftplib.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/httplib.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,385 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Copyright 2008 Python Software Foundation, Ian Bicking, and Google."""
+
+import mimetools
+import StringIO
+import sys
+
+
+CONTINUE = 100
+SWITCHING_PROTOCOLS = 101
+PROCESSING  = 102
+OK = 200
+CREATED = 201
+ACCEPTED = 202
+NON_AUTHORITATIVE_INFORMATION = 203
+NO_CONTENT = 204
+RESET_CONTENT = 205
+PARTIAL_CONTENT = 206
+MULTI_STATUS = 207
+IM_USED = 226
+MULTIPLE_CHOICES = 300
+MOVED_PERMANENTLY = 301
+FOUND = 302
+SEE_OTHER = 303
+NOT_MODIFIED = 304
+USE_PROXY = 305
+TEMPORARY_REDIRECT = 307
+BAD_REQUEST = 400
+UNAUTHORIZED = 401
+PAYMENT_REQUIRED = 402
+FORBIDDEN = 403
+NOT_FOUND = 404
+METHOD_NOT_ALLOWED = 405
+NOT_ACCEPTABLE = 406
+PROXY_AUTHENTICATION_REQUIRED = 407
+REQUEST_TIMEOUT = 408
+CONFLICT = 409
+GONE = 410
+LENGTH_REQUIRED = 411
+PRECONDITION_FAILED = 412
+REQUEST_ENTITY_TOO_LARGE = 413
+REQUEST_URI_TOO_LONG = 414
+UNSUPPORTED_MEDIA_TYPE = 415
+REQUESTED_RANGE_NOT_SATISFIABLE = 416
+EXPECTATION_FAILED = 417
+UNPROCESSABLE_ENTITY = 422
+LOCKED = 423
+FAILED_DEPENDENCY = 424
+UPGRADE_REQUIRED = 426
+INTERNAL_SERVER_ERROR = 500
+NOT_IMPLEMENTED = 501
+BAD_GATEWAY = 502
+SERVICE_UNAVAILABLE = 503
+GATEWAY_TIMEOUT = 504
+HTTP_VERSION_NOT_SUPPORTED = 505
+INSUFFICIENT_STORAGE = 507
+NOT_EXTENDED = 510
+
+responses = {
+  100: 'Continue',
+  101: 'Switching Protocols',
+
+  200: 'OK',
+  201: 'Created',
+  202: 'Accepted',
+  203: 'Non-Authoritative Information',
+  204: 'No Content',
+  205: 'Reset Content',
+  206: 'Partial Content',
+
+  300: 'Multiple Choices',
+  301: 'Moved Permanently',
+  302: 'Found',
+  303: 'See Other',
+  304: 'Not Modified',
+  305: 'Use Proxy',
+  306: '(Unused)',
+  307: 'Temporary Redirect',
+
+  400: 'Bad Request',
+  401: 'Unauthorized',
+  402: 'Payment Required',
+  403: 'Forbidden',
+  404: 'Not Found',
+  405: 'Method Not Allowed',
+  406: 'Not Acceptable',
+  407: 'Proxy Authentication Required',
+  408: 'Request Timeout',
+  409: 'Conflict',
+  410: 'Gone',
+  411: 'Length Required',
+  412: 'Precondition Failed',
+  413: 'Request Entity Too Large',
+  414: 'Request-URI Too Long',
+  415: 'Unsupported Media Type',
+  416: 'Requested Range Not Satisfiable',
+  417: 'Expectation Failed',
+
+  500: 'Internal Server Error',
+  501: 'Not Implemented',
+  502: 'Bad Gateway',
+  503: 'Service Unavailable',
+  504: 'Gateway Timeout',
+  505: 'HTTP Version Not Supported',
+}
+
+HTTP_PORT = 80
+HTTPS_PORT = 443
+
+
+
+
+
+class HTTPConnection:
+
+
+  protocol = 'http'
+  default_port = HTTP_PORT
+  _allow_truncated = True
+  _follow_redirects = False
+
+  def __init__(self, host, port=None, strict=False, timeout=None):
+    from google.appengine.api import urlfetch
+    self._fetch = urlfetch.fetch
+    self._method_map = {
+      'GET': urlfetch.GET,
+      'POST': urlfetch.POST,
+      'HEAD': urlfetch.HEAD,
+      'PUT': urlfetch.PUT,
+      'DELETE': urlfetch.DELETE,
+    }
+    self.host = host
+    self.port = port
+    self._method = self._url = None
+    self._body = ''
+    self.headers = []
+
+  def connect(self):
+    pass
+
+  def request(self, method, url, body=None, headers=None):
+    self._method = method
+    self._url = url
+    try:
+      self._body = body.read()
+    except AttributeError:
+      self._body = body
+    if headers is None:
+      headers = []
+    elif hasattr(headers, 'items'):
+      headers = headers.items()
+    self.headers = headers
+
+  def putrequest(self, request, selector, skip_host=False, skip_accept_encoding=False):
+    self._method = request
+    self._url = selector
+
+  def putheader(self, header, *lines):
+    line = '\r\n\t'.join(lines)
+    self.headers.append((header, line))
+
+  def endheaders(self):
+    pass
+
+  def set_debuglevel(self, level=None):
+    pass
+
+  def send(self, data):
+    self._body += data
+
+  def getresponse(self):
+    if self.port and self.port != self.default_port:
+        host = '%s:%s' % (self.host, self.port)
+    else:
+        host = self.host
+    url = '%s://%s%s' % (self.protocol, host, self._url)
+    headers = dict(self.headers)
+
+    try:
+      method = self._method_map[self._method.upper()]
+    except KeyError:
+      raise ValueError("%r is an unrecognized HTTP method" % self._method)
+
+    response = self._fetch(url, self._body, method, headers,
+                           self._allow_truncated, self._follow_redirects)
+    return HTTPResponse(response)
+
+  def close(self):
+      pass
+
+
+class HTTPSConnection(HTTPConnection):
+
+    protocol = 'https'
+    default_port = HTTPS_PORT
+
+    def __init__(self, host, port=None, key_file=None, cert_file=None,
+                 strict=False, timeout=None):
+        if key_file is not None or cert_file is not None:
+            raise NotImplementedError(
+                "key_file and cert_file arguments are not implemented")
+        HTTPConnection.__init__(self, host, port=port, strict=strict,
+                                timeout=timeout)
+
+
+class HTTPResponse(object):
+
+  def __init__(self, fetch_response):
+    self._fetch_response = fetch_response
+    self.fp = StringIO.StringIO(fetch_response.content)
+
+  def __getattr__(self, attr):
+    return getattr(self.fp, attr)
+
+  def getheader(self, name, default=None):
+    return self._fetch_response.headers.get(name, default)
+
+  def getheaders(self):
+    return self._fetch_response.headers.items()
+
+  @property
+  def msg(self):
+    msg = mimetools.Message(StringIO.StringIO(''))
+    for name, value in self._fetch_response.headers.items():
+      msg[name] = value
+    return msg
+
+  version = 11
+
+  @property
+  def status(self):
+    return self._fetch_response.status_code
+
+  @property
+  def reason(self):
+    return responses.get(self._fetch_response.status_code, 'Unknown')
+
+
+class HTTP:
+  "Compatibility class with httplib.py from 1.5."
+
+  _http_vsn = 11
+  _http_vsn_str = 'HTTP/1.1'
+
+  debuglevel = 0
+
+  _connection_class = HTTPConnection
+
+  def __init__(self, host='', port=None, strict=None):
+    "Provide a default host, since the superclass requires one."
+
+    if port == 0:
+      port = None
+
+    self._setup(self._connection_class(host, port, strict))
+
+  def _setup(self, conn):
+    self._conn = conn
+
+    self.send = conn.send
+    self.putrequest = conn.putrequest
+    self.endheaders = conn.endheaders
+    self.set_debuglevel = conn.set_debuglevel
+
+    conn._http_vsn = self._http_vsn
+    conn._http_vsn_str = self._http_vsn_str
+
+    self.file = None
+
+  def connect(self, host=None, port=None):
+    "Accept arguments to set the host/port, since the superclass doesn't."
+    self.__init__(host, port)
+
+  def getfile(self):
+    "Provide a getfile, since the superclass' does not use this concept."
+    return self.file
+
+  def putheader(self, header, *values):
+    "The superclass allows only one value argument."
+    self._conn.putheader(header, '\r\n\t'.join(values))
+
+  def getreply(self):
+    """Compat definition since superclass does not define it.
+
+    Returns a tuple consisting of:
+    - server status code (e.g. '200' if all goes well)
+    - server "reason" corresponding to status code
+    - any RFC822 headers in the response from the server
+    """
+    response = self._conn.getresponse()
+
+    self.headers = response.msg
+    self.file = response.fp
+    return response.status, response.reason, response.msg
+
+  def close(self):
+    self._conn.close()
+
+    self.file = None
+
+
+class HTTPS(HTTP):
+  """Compatibility with 1.5 httplib interface
+
+  Python 1.5.2 did not have an HTTPS class, but it defined an
+  interface for sending http requests that is also useful for
+  https.
+  """
+
+  _connection_class = HTTPSConnection
+
+  def __init__(self, host='', port=None, key_file=None, cert_file=None,
+               strict=None):
+    if key_file is not None or cert_file is not None:
+      raise NotImplementedError(
+          "key_file and cert_file arguments are not implemented")
+
+
+    if port == 0:
+      port = None
+    self._setup(self._connection_class(host, port, key_file,
+                                       cert_file, strict))
+
+    self.key_file = key_file
+    self.cert_file = cert_file
+
+
+class HTTPException(Exception):
+  pass
+
+class NotConnected(HTTPException):
+  pass
+
+class InvalidURL(HTTPException):
+  pass
+
+class UnknownProtocol(HTTPException):
+  def __init__(self, version):
+    self.version = version
+    HTTPException.__init__(self, version)
+
+class UnknownTransferEncoding(HTTPException):
+  pass
+
+class UnimplementedFileMode(HTTPException):
+  pass
+
+class IncompleteRead(HTTPException):
+  def __init__(self, partial):
+    self.partial = partial
+    HTTPException.__init__(self, partial)
+
+class ImproperConnectionState(HTTPException):
+  pass
+
+class CannotSendRequest(ImproperConnectionState):
+  pass
+
+class CannotSendHeader(ImproperConnectionState):
+  pass
+
+class ResponseNotReady(ImproperConnectionState):
+  pass
+
+class BadStatusLine(HTTPException):
+  def __init__(self, line):
+    self.line = line
+    HTTPException.__init__(self, line)
+
+error = HTTPException
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/neo_cgi.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/py_imp.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Stub replacement for Python's imp module."""
+
+
+import os
+import sys
+
+
+PY_SOURCE, PY_COMPILED, C_EXTENSION = 1, 2, 3
+PKG_DIRECTORY, C_BUILTIN, PY_FROZEN = 5, 6, 7
+
+
+def get_magic():
+  return '\0\0\0\0'
+
+
+def get_suffixes():
+  return [('.py', 'U', PY_SOURCE)]
+
+
+def new_module(name):
+  return type(sys.modules[__name__])(name)
+
+
+def lock_held():
+  """Return False since threading is not supported."""
+  return False
+
+def acquire_lock():
+  """Acquiring the lock is a no-op since no threading is supported."""
+  pass
+
+def release_lock():
+  """There is no lock to release since acquiring is a no-op when there is no
+  threading."""
+  pass
+
+
+def is_builtin(name):
+  return name in sys.builtin_module_names
+
+
+def is_frozen(name):
+  return False
+
+
+class NullImporter(object):
+
+  def __init__(self, path_string):
+    if not path_string:
+      raise ImportError("empty pathname")
+    elif os.path.isdir(path_string):
+      raise ImportError("existing directory")
+
+  def find_module(self, fullname):
+    return None
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/select.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/socket.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+AF_INET = None
+SOCK_STREAM = None
+SOCK_DGRAM = None
+
+_GLOBAL_DEFAULT_TIMEOUT = object()
+
+
+class error(OSError):
+  pass
+
+class herror(error):
+  pass
+
+class gaierror(error):
+  pass
+
+class timeout(error):
+  pass
+
+
+def _fileobject(fp, mode='rb', bufsize=-1, close=False):
+  """Assuming that the argument is a StringIO or file instance."""
+  if not hasattr(fp, 'fileno'):
+    fp.fileno = lambda: None
+  return fp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/subprocess.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/tempfile.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Temporary files.
+
+This module is a replacement for the stock tempfile module in Python,
+and provides only in-memory temporary files as implemented by
+cStringIO. The only functionality provided is the TemporaryFile
+function.
+"""
+
+try:
+  from cStringIO import StringIO
+except ImportError:
+  from StringIO import StringIO
+
+__all__ = [
+  "TemporaryFile",
+
+  "NamedTemporaryFile", "mkstemp", "mkdtemp", "mktemp",
+  "TMP_MAX", "gettempprefix", "tempdir", "gettempdir",
+]
+
+TMP_MAX = 10000
+
+template = "tmp"
+
+tempdir = None
+
+def TemporaryFile(mode='w+b', bufsize=-1, suffix="",
+                  prefix=template, dir=None):
+  """Create and return a temporary file.
+  Arguments:
+  'prefix', 'suffix', 'dir', 'mode', 'bufsize' are all ignored.
+
+  Returns an object with a file-like interface.  The file is in memory
+  only, and does not exist on disk.
+  """
+
+  return StringIO()
+
+def PlaceHolder(*args, **kwargs):
+  raise NotImplementedError("Only tempfile.TemporaryFile is available for use")
+
+NamedTemporaryFile = PlaceHolder
+mkstemp = PlaceHolder
+mkdtemp = PlaceHolder
+mktemp = PlaceHolder
+gettempprefix = PlaceHolder
+tempdir = PlaceHolder
+gettempdir = PlaceHolder
--- a/thirdparty/google_appengine/google/appengine/ext/bulkload/__init__.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/bulkload/__init__.py	Thu Feb 12 12:30:36 2009 +0000
@@ -105,17 +105,12 @@
 import csv
 import httplib
 import os
-import sys
 import traceback
-import types
-import struct
-import zlib
 
 import google
 import wsgiref.handlers
 
 from google.appengine.api import datastore
-from google.appengine.api import datastore_types
 from google.appengine.ext import webapp
 from google.appengine.ext.bulkload import constants
 
@@ -299,13 +294,8 @@
     """ Handle a POST. Reads CSV data, converts to entities, and stores them.
     """
     self.response.headers['Content-Type'] = 'text/plain'
-    version = self.request.headers.get('GAE-Uploader-Version', '0')
-    if version == '1':
-      kind = self.request.headers.get('GAE-Uploader-Kind')
-      response, output = self.LoadV1(kind, self.request.body)
-    else:
-      response, output = self.Load(self.request.get(constants.KIND_PARAM),
-                                   self.request.get(constants.CSV_PARAM))
+    response, output = self.Load(self.request.get(constants.KIND_PARAM),
+                                 self.request.get(constants.CSV_PARAM))
     self.response.set_status(response)
     self.response.out.write(output)
 
@@ -422,69 +412,6 @@
 
     return self.LoadEntities(self.IterRows(reader), loader)
 
-  def IterRowsV1(self, data):
-    """Yields a tuple of columns for each row in the uploaded data.
-
-    Args:
-      data: a string containing the unzipped v1 format data to load.
-
-    """
-    column_count, = struct.unpack_from('!i', data)
-    offset = 4
-
-    lengths_format = '!%di' % (column_count,)
-
-    while offset < len(data):
-      id_num = struct.unpack_from('!i', data, offset=offset)
-      offset += 4
-
-      value_lengths = struct.unpack_from(lengths_format, data, offset=offset)
-      offset += 4 * column_count
-
-      columns = struct.unpack_from(''.join('%ds' % length
-                                           for length in value_lengths), data,
-                                   offset=offset)
-      offset += sum(value_lengths)
-
-      yield (id_num, columns)
-
-
-  def LoadV1(self, kind, data):
-    """Parses version-1 format data, converts to entities, and stores them.
-
-    On error, fails fast. Returns a "bad request" HTTP response code and
-    includes the traceback in the output.
-
-    Args:
-      kind: a string containing the entity kind that this loader handles
-      data: a string containing the (v1 format) data to load
-
-    Returns:
-      tuple (response code, output) where:
-        response code: integer HTTP response code to return
-        output: string containing the HTTP response body
-    """
-    Validate(kind, basestring)
-    Validate(data, basestring)
-    output = []
-
-    try:
-      loader = Loader.RegisteredLoaders()[kind]
-    except KeyError:
-      output.append('Error: no Loader defined for kind %s.' % kind)
-      return (httplib.BAD_REQUEST, ''.join(output))
-
-    try:
-      data = zlib.decompress(data)
-    except:
-      stacktrace = traceback.format_exc()
-      output.append('Error: Could not decompress data\n%s' % stacktrace)
-      return (httplib.BAD_REQUEST, ''.join(output))
-
-    key_format = 'i%010d'
-    return self.LoadEntities(self.IterRowsV1(data),
-                             loader,
-                             key_format=key_format)
 
 def main(*loaders):
   """Starts bulk upload.
--- a/thirdparty/google_appengine/google/appengine/ext/db/__init__.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/db/__init__.py	Thu Feb 12 12:30:36 2009 +0000
@@ -77,10 +77,13 @@
 
 
 
+import copy
 import datetime
 import logging
+import re
 import time
 import urlparse
+import warnings
 
 from google.appengine.api import datastore
 from google.appengine.api import datastore_errors
@@ -187,6 +190,11 @@
 _ALLOWED_EXPANDO_PROPERTY_TYPES = set(_ALLOWED_PROPERTY_TYPES)
 _ALLOWED_EXPANDO_PROPERTY_TYPES.update((list, tuple, type(None)))
 
+_OPERATORS = ['<', '<=', '>', '>=', '=', '==', '!=', 'in']
+_FILTER_REGEX = re.compile(
+    '^\s*([^\s]+)(\s+(%s)\s*)?$' % '|'.join(_OPERATORS),
+    re.IGNORECASE | re.UNICODE)
+
 
 def class_for_kind(kind):
   """Return base-class responsible for implementing kind.
@@ -527,12 +535,12 @@
                **kwds):
     """Creates a new instance of this model.
 
-    To create a new entity, you instantiate a model and then call save(),
+    To create a new entity, you instantiate a model and then call put(),
     which saves the entity to the datastore:
 
        person = Person()
        person.name = 'Bret'
-       person.save()
+       person.put()
 
     You can initialize properties in the model in the constructor with keyword
     arguments:
@@ -595,7 +603,7 @@
 
     This property is only available if this entity is already stored in the
     datastore, so it is available if this entity was fetched returned from a
-    query, or after save() is called the first time for new entities.
+    query, or after put() is called the first time for new entities.
 
     Returns:
       Datastore key of persisted entity.
@@ -606,7 +614,11 @@
     if self.is_saved():
       return self._entity.key()
     elif self._key_name:
-      parent = self._parent and self._parent.key()
+      if self._parent_key:
+        parent_key = self._parent_key
+      elif self._parent:
+          parent_key = self._parent.key()
+      parent = self._parent_key or (self._parent and self._parent.key())
       return Key.from_path(self.kind(), self._key_name, parent=parent)
     else:
       raise NotSavedError()
@@ -1143,7 +1155,7 @@
 
     1 - Because it is not possible for the datastore to know what kind of
         property to store on an undefined expando value, setting a property to
-        None is the same as deleting it form the expando.
+        None is the same as deleting it from the expando.
 
     2 - Persistent variables on Expando must not begin with '_'.  These
         variables considered to be 'protected' in Python, and are used
@@ -1524,16 +1536,71 @@
       model_class: Model class to build query for.
     """
     super(Query, self).__init__(model_class)
-    self.__query_set = {}
+    self.__query_sets = [{}]
     self.__orderings = []
     self.__ancestor = None
 
-  def _get_query(self, _query_class=datastore.Query):
-    query = _query_class(self._model_class.kind(), self.__query_set)
-    if self.__ancestor is not None:
-      query.Ancestor(self.__ancestor)
-    query.Order(*self.__orderings)
-    return query
+  def _get_query(self,
+                 _query_class=datastore.Query,
+                 _multi_query_class=datastore.MultiQuery):
+    queries = []
+    for query_set in self.__query_sets:
+      query = _query_class(self._model_class.kind(), query_set)
+      if self.__ancestor is not None:
+        query.Ancestor(self.__ancestor)
+      queries.append(query)
+
+    if (_query_class != datastore.Query and
+        _multi_query_class == datastore.MultiQuery):
+      warnings.warn(
+          'Custom _query_class specified without corresponding custom'
+          ' _query_multi_class. Things will break if you use queries with'
+          ' the "IN" or "!=" operators.', RuntimeWarning)
+      if len(queries) > 1:
+        raise datastore_errors.BadArgumentError(
+            'Query requires multiple subqueries to satisfy. If _query_class'
+            ' is overridden, _multi_query_class must also be overridden.')
+    elif (_query_class == datastore.Query and
+          _multi_query_class != datastore.MultiQuery):
+      raise BadArgumentError('_query_class must also be overridden if'
+                             ' _multi_query_class is overridden.')
+
+    if len(queries) == 1:
+      queries[0].Order(*self.__orderings)
+      return queries[0]
+    else:
+      return _multi_query_class(queries, self.__orderings)
+
+  def __filter_disjunction(self, operations, values):
+    """Add a disjunction of several filters and several values to the query.
+
+    This is implemented by duplicating queries and combining the
+    results later.
+
+    Args:
+      operations: a string or list of strings. Each string contains a
+        property name and an operator to filter by. The operators
+        themselves must not require multiple queries to evaluate
+        (currently, this means that 'in' and '!=' are invalid).
+
+      values: a value or list of filter values, normalized by
+        _normalize_query_parameter.
+    """
+    if not isinstance(operations, (list, tuple)):
+      operations = [operations]
+    if not isinstance(values, (list, tuple)):
+      values = [values]
+
+    new_query_sets = []
+    for operation in operations:
+      if operation.lower().endswith('in') or operation.endswith('!='):
+        raise BadQueryError('Cannot use "in" or "!=" in a disjunction.')
+      for query_set in self.__query_sets:
+        for value in values:
+          new_query_set = copy.copy(query_set)
+          datastore._AddOrAppend(new_query_set, operation, value)
+          new_query_sets.append(new_query_set)
+    self.__query_sets = new_query_sets
 
   def filter(self, property_operator, value):
     """Add filter to query.
@@ -1545,11 +1612,29 @@
     Returns:
       Self to support method chaining.
     """
-    if isinstance(value, (list, tuple)):
-      raise BadValueError('Filtering on lists is not supported')
-
-    value = _normalize_query_parameter(value)
-    datastore._AddOrAppend(self.__query_set, property_operator, value)
+    match = _FILTER_REGEX.match(property_operator)
+    prop = match.group(1)
+    if match.group(3) is not None:
+      operator = match.group(3)
+    else:
+      operator = '=='
+
+    if operator.lower() == 'in':
+      if not isinstance(value, (list, tuple)):
+        raise BadValueError('Argument to the "in" operator must be a list')
+      values = [_normalize_query_parameter(v) for v in value]
+      self.__filter_disjunction(prop + ' =', values)
+    else:
+      if isinstance(value, (list, tuple)):
+        raise BadValueError('Filtering on lists is not supported')
+      if operator == '!=':
+        self.__filter_disjunction([prop + ' <', prop + ' >'],
+                                  _normalize_query_parameter(value))
+      else:
+        value = _normalize_query_parameter(value)
+        for query_set in self.__query_sets:
+          datastore._AddOrAppend(query_set, property_operator, value)
+
     return self
 
   def order(self, property):
@@ -2359,8 +2444,11 @@
     Returns:
       validated list appropriate to save in the datastore.
     """
-    return self.validate_list_contents(
+    value = self.validate_list_contents(
         super(ListProperty, self).get_value_for_datastore(model_instance))
+    if self.validator:
+      self.validator(value)
+    return value
 
 
 class StringListProperty(ListProperty):
@@ -2622,5 +2710,7 @@
 
 
 run_in_transaction = datastore.RunInTransaction
+run_in_transaction_custom_retries = datastore.RunInTransactionCustomRetries
 
 RunInTransaction = run_in_transaction
+RunInTransactionCustomRetries = run_in_transaction_custom_retries
--- a/thirdparty/google_appengine/google/appengine/ext/gql/__init__.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/gql/__init__.py	Thu Feb 12 12:30:36 2009 +0000
@@ -28,7 +28,6 @@
 
 import calendar
 import datetime
-import heapq
 import logging
 import re
 import time
@@ -38,6 +37,7 @@
 from google.appengine.api import datastore_types
 from google.appengine.api import users
 
+MultiQuery = datastore.MultiQuery
 
 LOG_LEVEL = logging.DEBUG - 1
 
@@ -161,7 +161,7 @@
     \S+
     """, re.VERBOSE | re.IGNORECASE)
 
-  MAX_ALLOWABLE_QUERIES = 30
+  MAX_ALLOWABLE_QUERIES = datastore.MAX_ALLOWABLE_QUERIES
 
   __ANCESTOR = -1
 
@@ -347,37 +347,113 @@
     else:
       return users.User(email=values[0], _auth_domain=self.__auth_domain)
 
+  def __EncodeIfNeeded(self, value):
+    """Simple helper function to create an str from possibly unicode strings.
+    Args:
+      value: input string (should pass as an instance of str or unicode).
+    """
+    if isinstance(value, unicode):
+      return value.encode('utf8')
+    else:
+      return value
+
   def __CastDate(self, values):
-    """Cast date values to DATETIME() class using ISO string or tuple inputs."""
+    """Cast DATE values (year/month/day) from input (to datetime.datetime).
+
+    Casts DATE input values formulated as ISO string or time tuple inputs.
+
+    Args:
+      values: either a single string with ISO time representation or 3
+              integer valued date tuple (year, month, day).
+
+    Returns:
+      datetime.datetime value parsed from the input values.
+    """
+
+    if len(values) == 1:
+      value = self.__EncodeIfNeeded(values[0])
+      if isinstance(value, str):
+        try:
+          time_tuple = time.strptime(value, '%Y-%m-%d')[0:6]
+        except ValueError, err:
+          self.__CastError('DATE', values, err)
+      else:
+        self.__CastError('DATE', values, 'Single input value not a string')
+    elif len(values) == 3:
+      time_tuple = (values[0], values[1], values[2], 0, 0, 0)
+    else:
+      self.__CastError('DATE', values,
+                       'function takes 1 string or 3 integer values')
+
     try:
-      if len(values) == 1 and isinstance(values[0], str):
-        time_tuple = time.strptime(values[0], '%Y-%m-%d')
-        return datetime.datetime(*time_tuple[0:6])
-      else:
-        return datetime.datetime(values[0], values[1], values[2], 0, 0, 0)
+      return datetime.datetime(*time_tuple)
     except ValueError, err:
       self.__CastError('DATE', values, err)
 
   def __CastTime(self, values):
-    """Cast time values to DATETIME() class using ISO string or tuple inputs."""
+    """Cast TIME values (hour/min/sec) from input (to datetime.datetime).
+
+    Casts TIME input values formulated as ISO string or time tuple inputs.
+
+    Args:
+      values: either a single string with ISO time representation or 1-4
+              integer valued time tuple (hour), (hour, minute),
+              (hour, minute, second), (hour, minute, second, microsec).
+
+    Returns:
+      datetime.datetime value parsed from the input values.
+    """
+    if len(values) == 1:
+      value = self.__EncodeIfNeeded(values[0])
+      if isinstance(value, str):
+        try:
+          time_tuple = time.strptime(value, '%H:%M:%S')
+        except ValueError, err:
+          self.__CastError('TIME', values, err)
+        time_tuple = (1970, 1, 1) + time_tuple[3:]
+        time_tuple = time_tuple[0:6]
+      elif isinstance(value, int):
+        time_tuple = (1970, 1, 1, value)
+      else:
+        self.__CastError('TIME', values,
+                         'Single input value not a string or integer hour')
+    elif len(values) <= 4:
+      time_tuple = (1970, 1, 1) + tuple(values)
+    else:
+      self.__CastError('TIME', values, err)
+
     try:
-      if len(values) == 1 and isinstance(values[0], str):
-        time_tuple = time.strptime(values[0], '%H:%M:%S')
-        time_tuple = (1970, 1, 1) + time_tuple[3:]
-        return datetime.datetime(*time_tuple[0:6])
-      else:
-        return datetime.datetime(1970, 1, 1, *values)
+      return datetime.datetime(*time_tuple)
     except ValueError, err:
       self.__CastError('TIME', values, err)
 
   def __CastDatetime(self, values):
-    """Cast values to DATETIME() class using ISO string or tuple inputs."""
+    """Cast DATETIME values (string or tuple) from input (to datetime.datetime).
+
+    Casts DATETIME input values formulated as ISO string or datetime tuple
+    inputs.
+
+    Args:
+      values: either a single string with ISO representation or 3-7
+              integer valued time tuple (year, month, day, ...).
+
+    Returns:
+      datetime.datetime value parsed from the input values.
+    """
+    if len(values) == 1:
+      value = self.__EncodeIfNeeded(values[0])
+      if isinstance(value, str):
+        try:
+          time_tuple = time.strptime(str(value), '%Y-%m-%d %H:%M:%S')[0:6]
+        except ValueError, err:
+          self.__CastError('DATETIME', values, err)
+      else:
+        self.__CastError('DATETIME', values, 'Single input value not a string')
+    else:
+      time_tuple = values
+
     try:
-      if len(values) == 1 and isinstance(values[0], str):
-        time_tuple = time.strptime(values[0], '%Y-%m-%d %H:%M:%S')
-        return datetime.datetime(*time_tuple[0:6])
-      else:
-        return datetime.datetime(*values)
+      return datetime.datetime(*time_tuple)
     except ValueError, err:
       self.__CastError('DATETIME', values, err)
 
@@ -1062,220 +1138,3 @@
 
   def __repr__(self):
     return 'Literal(%s)' % repr(self.__value)
-
-
-class MultiQuery(datastore.Query):
-  """Class representing a GQL query requiring multiple datastore queries.
-
-  This class is actually a subclass of datastore.Query as it is intended to act
-  like a normal Query object (supporting the same interface).
-  """
-
-  def __init__(self, bound_queries, orderings):
-    self.__bound_queries = bound_queries
-    self.__orderings = orderings
-
-  def __str__(self):
-    res = 'MultiQuery: '
-    for query in self.__bound_queries:
-      res = '%s %s' % (res, str(query))
-    return res
-
-  def Get(self, limit, offset=0):
-    """Get results of the query with a limit on the number of results.
-
-    Args:
-      limit: maximum number of values to return.
-      offset: offset requested -- if nonzero, this will override the offset in
-              the original query
-
-    Returns:
-      A list of entities with at most "limit" entries (less if the query
-      completes before reading limit values).
-    """
-    count = 1
-    result = []
-
-    iterator = self.Run()
-
-    try:
-      for i in xrange(offset):
-        val = iterator.next()
-    except StopIteration:
-      pass
-
-    try:
-      while count <= limit:
-        val = iterator.next()
-        result.append(val)
-        count += 1
-    except StopIteration:
-      pass
-    return result
-
-  class SortOrderEntity(object):
-    def __init__(self, entity_iterator, orderings):
-      self.__entity_iterator = entity_iterator
-      self.__entity = None
-      self.__min_max_value_cache = {}
-      try:
-        self.__entity = entity_iterator.next()
-      except StopIteration:
-        pass
-      else:
-        self.__orderings = orderings
-
-    def __str__(self):
-      return str(self.__entity)
-
-    def GetEntity(self):
-      return self.__entity
-
-    def GetNext(self):
-      return MultiQuery.SortOrderEntity(self.__entity_iterator,
-                                        self.__orderings)
-
-    def CmpProperties(self, that):
-      """Compare two entities and return their relative order.
-
-      Compares self to that based on the current sort orderings and the
-      key orders between them. Returns negative, 0, or positive depending on
-      whether self is less, equal to, or greater than that. This
-      comparison returns as if all values were to be placed in ascending order
-      (highest value last).  Only uses the sort orderings to compare (ignores
-       keys).
-
-      Args:
-        self: SortOrderEntity
-        that: SortOrderEntity
-
-      Returns:
-        Negative if self < that
-        Zero if self == that
-        Positive if self > that
-      """
-      if not self.__entity:
-        return cmp(self.__entity, that.__entity)
-
-      for (identifier, order) in self.__orderings:
-        value1 = self.__GetValueForId(self, identifier, order)
-        value2 = self.__GetValueForId(that, identifier, order)
-
-        result = cmp(value1, value2)
-        if order == datastore.Query.DESCENDING:
-          result = -result
-        if result:
-          return result
-      return 0
-
-    def __GetValueForId(self, sort_order_entity, identifier, sort_order):
-      value = sort_order_entity.__entity[identifier]
-      entity_key = sort_order_entity.__entity.key()
-      if self.__min_max_value_cache.has_key((entity_key, identifier)):
-        value = self.__min_max_value_cache[(entity_key, identifier)]
-      elif isinstance(value, list):
-        if sort_order == datastore.Query.DESCENDING:
-          value = min(value)
-        else:
-          value = max(value)
-        self.__min_max_value_cache[(entity_key, identifier)] = value
-
-      return value
-
-    def __cmp__(self, that):
-      """Compare self to that w.r.t. values defined in the sort order.
-
-      Compare an entity with another, using sort-order first, then the key
-      order to break ties. This can be used in a heap to have faster min-value
-      lookup.
-
-      Args:
-        that: other entity to compare to
-      Returns:
-        negative: if self is less than that in sort order
-        zero: if self is equal to that in sort order
-        positive: if self is greater than that in sort order
-      """
-      property_compare = self.CmpProperties(that)
-      if property_compare:
-        return property_compare
-      else:
-        return cmp(self.__entity.key(), that.__entity.key())
-
-  def Run(self):
-    """Return an iterable output with all results in order."""
-    results = []
-    count = 1
-    for bound_query in self.__bound_queries:
-      logging.log(LOG_LEVEL, 'Running query #%i' % count)
-      results.append(bound_query.Run())
-      count += 1
-
-    def IterateResults(results):
-      """Iterator function to return all results in sorted order.
-
-      Iterate over the array of results, yielding the next element, in
-      sorted order. This function is destructive (results will be empty
-      when the operation is complete).
-
-      Args:
-        results: list of result iterators to merge and iterate through
-
-      Yields:
-        The next result in sorted order.
-      """
-      result_heap = []
-      for result in results:
-        heap_value = MultiQuery.SortOrderEntity(result, self.__orderings)
-        if heap_value.GetEntity():
-          heapq.heappush(result_heap, heap_value)
-
-      used_keys = set()
-
-      while result_heap:
-        top_result = heapq.heappop(result_heap)
-
-        results_to_push = []
-        if top_result.GetEntity().key() not in used_keys:
-          yield top_result.GetEntity()
-        else:
-          pass
-
-        used_keys.add(top_result.GetEntity().key())
-
-        results_to_push = []
-        while result_heap:
-          next = heapq.heappop(result_heap)
-          if cmp(top_result, next):
-            results_to_push.append(next)
-            break
-          else:
-            results_to_push.append(next.GetNext())
-        results_to_push.append(top_result.GetNext())
-
-        for popped_result in results_to_push:
-          if popped_result.GetEntity():
-            heapq.heappush(result_heap, popped_result)
-
-    return IterateResults(results)
-
-  def Count(self, limit=None):
-    """Return the number of matched entities for this query.
-
-    Will return the de-duplicated count of results.  Will call the more
-    efficient Get() function if a limit is given.
-
-    Args:
-      limit: maximum number of entries to count (for any result > limit, return
-      limit).
-    Returns:
-      count of the number of entries returned.
-    """
-    if limit is None:
-      count = 0
-      for value in self.Run():
-        count += 1
-      return count
-    else:
-      return len(self.Get(limit))
-
--- a/thirdparty/google_appengine/google/appengine/ext/remote_api/__init__.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/__init__.py	Thu Feb 12 12:30:36 2009 +0000
@@ -14,385 +14,3 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
-"""An apiproxy stub that calls a remote handler via HTTP.
-
-This allows easy remote access to the App Engine datastore, and potentially any
-of the other App Engine APIs, using the same interface you use when accessing
-the service locally.
-
-An example Python script:
----
-from google.appengine.ext import db
-from google.appengine.ext import remote_api
-from myapp import models
-import getpass
-
-def auth_func():
-  return (raw_input('Username:'), getpass.getpass('Password:'))
-
-remote_api.ConfigureRemoteDatastore('my-app', '/remote_api', auth_func)
-
-# Now you can access the remote datastore just as if your code was running on
-# App Engine!
-
-houses = models.House.all().fetch(100)
-for a_house in q:
-  a_house.doors += 1
-db.put(houses)
----
-
-A few caveats:
-- Where possible, avoid iterating over queries directly. Fetching as many
-  results as you will need is faster and more efficient.
-- If you need to iterate, consider instead fetching items in batches with a sort
-  order and constructing a new query starting from where the previous one left
-  off. The __key__ pseudo-property can be used as a sort key for this purpose,
-  and does not even require a custom index if you are iterating over all
-  entities of a given type.
-- Likewise, it's a good idea to put entities in batches. Instead of calling put
-  for each individual entity, accumulate them and put them in batches using
-  db.put(), if you can.
-- Requests and responses are still limited to 1MB each, so if you have large
-  entities or try and fetch or put many of them at once, your requests may fail.
-"""
-
-
-
-
-
-import os
-import pickle
-import sha
-import sys
-import thread
-import threading
-from google.appengine.api import apiproxy_stub_map
-from google.appengine.datastore import datastore_pb
-from google.appengine.ext.remote_api import remote_api_pb
-from google.appengine.runtime import apiproxy_errors
-from google.appengine.tools import appengine_rpc
-
-
-def GetUserAgent():
-  """Determines the value of the 'User-agent' header to use for HTTP requests.
-
-  Returns:
-    String containing the 'user-agent' header value, which includes the SDK
-    version, the platform information, and the version of Python;
-    e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
-  """
-  product_tokens = []
-
-  product_tokens.append("Google-remote_api/1.0")
-
-  product_tokens.append(appengine_rpc.GetPlatformToken())
-
-  python_version = ".".join(str(i) for i in sys.version_info)
-  product_tokens.append("Python/%s" % python_version)
-
-  return " ".join(product_tokens)
-
-
-def GetSourceName():
-  return "Google-remote_api-1.0"
-
-
-class TransactionData(object):
-  """Encapsulates data about an individual transaction."""
-
-  def __init__(self, thread_id):
-    self.thread_id = thread_id
-    self.preconditions = {}
-    self.entities = {}
-
-
-class RemoteStub(object):
-  """A stub for calling services on a remote server over HTTP.
-
-  You can use this to stub out any service that the remote server supports.
-  """
-
-  def __init__(self, server, path):
-    """Constructs a new RemoteStub that communicates with the specified server.
-
-    Args:
-      server: An instance of a subclass of
-        google.appengine.tools.appengine_rpc.AbstractRpcServer.
-      path: The path to the handler this stub should send requests to.
-    """
-    self._server = server
-    self._path = path
-
-  def MakeSyncCall(self, service, call, request, response):
-    request_pb = remote_api_pb.Request()
-    request_pb.set_service_name(service)
-    request_pb.set_method(call)
-    request_pb.mutable_request().set_contents(request.Encode())
-
-    response_pb = remote_api_pb.Response()
-    response_pb.ParseFromString(self._server.Send(self._path,
-                                                  request_pb.Encode()))
-
-    if response_pb.has_exception():
-      raise pickle.loads(response_pb.exception().contents())
-    else:
-      response.ParseFromString(response_pb.response().contents())
-
-
-class RemoteDatastoreStub(RemoteStub):
-  """A specialised stub for accessing the App Engine datastore remotely.
-
-  A specialised stub is required because there are some datastore operations
-  that preserve state between calls. This stub makes queries possible.
-  Transactions on the remote datastore are unfortunately still impossible.
-  """
-
-  def __init__(self, server, path):
-    super(RemoteDatastoreStub, self).__init__(server, path)
-    self.__queries = {}
-    self.__transactions = {}
-
-    self.__next_local_cursor = 1
-    self.__local_cursor_lock = threading.Lock()
-    self.__next_local_tx = 1
-    self.__local_tx_lock = threading.Lock()
-
-  def MakeSyncCall(self, service, call, request, response):
-    assert service == 'datastore_v3'
-
-    explanation = []
-    assert request.IsInitialized(explanation), explanation
-
-    handler = getattr(self, '_Dynamic_' + call, None)
-    if handler:
-      handler(request, response)
-    else:
-      super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
-                                                    response)
-
-    assert response.IsInitialized(explanation), explanation
-
-  def _Dynamic_RunQuery(self, query, query_result):
-    self.__local_cursor_lock.acquire()
-    try:
-      cursor_id = self.__next_local_cursor
-      self.__next_local_cursor += 1
-    finally:
-      self.__local_cursor_lock.release()
-    self.__queries[cursor_id] = query
-
-    query_result.mutable_cursor().set_cursor(cursor_id)
-    query_result.set_more_results(True)
-
-  def _Dynamic_Next(self, next_request, query_result):
-    cursor = next_request.cursor().cursor()
-    if cursor not in self.__queries:
-      raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
-                                             'Cursor %d not found' % cursor)
-    query = self.__queries[cursor]
-
-    if query is None:
-      query_result.set_more_results(False)
-      return
-
-    request = datastore_pb.Query()
-    request.CopyFrom(query)
-    if request.has_limit():
-      request.set_limit(min(request.limit(), next_request.count()))
-    else:
-      request.set_limit(next_request.count())
-
-    super(RemoteDatastoreStub, self).MakeSyncCall(
-        'remote_datastore', 'RunQuery', request, query_result)
-
-    query.set_offset(query.offset() + query_result.result_size())
-    if query.has_limit():
-      query.set_limit(query.limit() - query_result.result_size())
-    if not query_result.more_results():
-      self.__queries[cursor] = None
-
-  def _Dynamic_Get(self, get_request, get_response):
-    txid = None
-    if get_request.has_transaction():
-      txid = get_request.transaction().handle()
-      txdata = self.__transactions[txid]
-      assert (txdata.thread_id == thread.get_ident(),
-              "Transactions are single-threaded.")
-
-      keys = [(k, k.Encode()) for k in get_request.key_list()]
-
-      new_request = datastore_pb.GetRequest()
-      for key, enckey in keys:
-        if enckey not in txdata.entities:
-          new_request.add_key().CopyFrom(key)
-    else:
-      new_request = get_request
-
-    if new_request.key_size() > 0:
-      super(RemoteDatastoreStub, self).MakeSyncCall(
-          'datastore_v3', 'Get', new_request, get_response)
-
-    if txid is not None:
-      newkeys = new_request.key_list()
-      entities = get_response.entity_list()
-      for key, entity in zip(newkeys, entities):
-        entity_hash = None
-        if entity.has_entity():
-          entity_hash = sha.new(entity.entity().Encode()).digest()
-        txdata.preconditions[key.Encode()] = (key, entity_hash)
-
-      new_response = datastore_pb.GetResponse()
-      it = iter(get_response.entity_list())
-      for key, enckey in keys:
-        if enckey in txdata.entities:
-          cached_entity = txdata.entities[enckey][1]
-          if cached_entity:
-            new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
-          else:
-            new_response.add_entity()
-        else:
-          new_entity = it.next()
-          if new_entity.has_entity():
-            assert new_entity.entity().key() == key
-            new_response.add_entity().CopyFrom(new_entity)
-          else:
-            new_response.add_entity()
-      get_response.CopyFrom(new_response)
-
-  def _Dynamic_Put(self, put_request, put_response):
-    if put_request.has_transaction():
-      entities = put_request.entity_list()
-
-      requires_id = lambda x: x.id() == 0 and not x.has_name()
-      new_ents = [e for e in entities
-                  if requires_id(e.key().path().element_list()[-1])]
-      id_request = remote_api_pb.PutRequest()
-      if new_ents:
-        for ent in new_ents:
-          e = id_request.add_entity()
-          e.mutable_key().CopyFrom(ent.key())
-          e.mutable_entity_group()
-        id_response = datastore_pb.PutResponse()
-        super(RemoteDatastoreStub, self).MakeSyncCall(
-            'remote_datastore', 'GetIDs', id_request, id_response)
-        assert id_request.entity_size() == id_response.key_size()
-        for key, ent in zip(id_response.key_list(), new_ents):
-          ent.mutable_key().CopyFrom(key)
-          ent.mutable_entity_group().add_element().CopyFrom(
-              key.path().element(0))
-
-      txid = put_request.transaction().handle()
-      txdata = self.__transactions[txid]
-      assert (txdata.thread_id == thread.get_ident(),
-              "Transactions are single-threaded.")
-      for entity in entities:
-        txdata.entities[entity.key().Encode()] = (entity.key(), entity)
-        put_response.add_key().CopyFrom(entity.key())
-    else:
-      super(RemoteDatastoreStub, self).MakeSyncCall(
-          'datastore_v3', 'Put', put_request, put_response)
-
-  def _Dynamic_Delete(self, delete_request, response):
-    if delete_request.has_transaction():
-      txid = delete_request.transaction().handle()
-      txdata = self.__transactions[txid]
-      assert (txdata.thread_id == thread.get_ident(),
-              "Transactions are single-threaded.")
-      for key in delete_request.key_list():
-        txdata.entities[key.Encode()] = (key, None)
-    else:
-      super(RemoteDatastoreStub, self).MakeSyncCall(
-          'datastore_v3', 'Delete', delete_request, response)
-
-  def _Dynamic_BeginTransaction(self, request, transaction):
-    self.__local_tx_lock.acquire()
-    try:
-      txid = self.__next_local_tx
-      self.__transactions[txid] = TransactionData(thread.get_ident())
-      self.__next_local_tx += 1
-    finally:
-      self.__local_tx_lock.release()
-    transaction.set_handle(txid)
-
-  def _Dynamic_Commit(self, transaction, transaction_response):
-    txid = transaction.handle()
-    if txid not in self.__transactions:
-      raise apiproxy_errors.ApplicationError(
-          datastore_pb.Error.BAD_REQUEST,
-          'Transaction %d not found.' % (txid,))
-
-    txdata = self.__transactions[txid]
-    assert (txdata.thread_id == thread.get_ident(),
-            "Transactions are single-threaded.")
-    del self.__transactions[txid]
-
-    tx = remote_api_pb.TransactionRequest()
-    for key, hash in txdata.preconditions.values():
-      precond = tx.add_precondition()
-      precond.mutable_key().CopyFrom(key)
-      if hash:
-        precond.set_hash(hash)
-
-    puts = tx.mutable_puts()
-    deletes = tx.mutable_deletes()
-    for key, entity in txdata.entities.values():
-      if entity:
-        puts.add_entity().CopyFrom(entity)
-      else:
-        deletes.add_key().CopyFrom(key)
-
-    super(RemoteDatastoreStub, self).MakeSyncCall(
-        'remote_datastore', 'Transaction',
-        tx, datastore_pb.PutResponse())
-
-  def _Dynamic_Rollback(self, transaction, transaction_response):
-    txid = transaction.handle()
-    self.__local_tx_lock.acquire()
-    try:
-      if txid not in self.__transactions:
-        raise apiproxy_errors.ApplicationError(
-            datastore_pb.Error.BAD_REQUEST,
-            'Transaction %d not found.' % (txid,))
-
-      assert (txdata[txid].thread_id == thread.get_ident(),
-              "Transactions are single-threaded.")
-      del self.__transactions[txid]
-    finally:
-      self.__local_tx_lock.release()
-
-  def _Dynamic_CreateIndex(self, index, id_response):
-    raise apiproxy_errors.CapabilityDisabledError(
-        'The remote datastore does not support index manipulation.')
-
-  def _Dynamic_UpdateIndex(self, index, void):
-    raise apiproxy_errors.CapabilityDisabledError(
-        'The remote datastore does not support index manipulation.')
-
-  def _Dynamic_DeleteIndex(self, index, void):
-    raise apiproxy_errors.CapabilityDisabledError(
-        'The remote datastore does not support index manipulation.')
-
-
-def ConfigureRemoteDatastore(app_id, path, auth_func, servername=None):
-  """Does necessary setup to allow easy remote access to an AppEngine datastore.
-
-  Args:
-    app_id: The app_id of your app, as declared in app.yaml.
-    path: The path to the remote_api handler for your app
-      (for example, '/remote_api').
-    auth_func: A function that takes no arguments and returns a
-      (username, password) tuple. This will be called if your application
-      requires authentication to access the remote_api handler (it should!)
-      and you do not already have a valid auth cookie.
-    servername: The hostname your app is deployed on. Defaults to
-      <app_id>.appspot.com.
-  """
-  if not servername:
-    servername = '%s.appspot.com' % (app_id,)
-  os.environ['APPLICATION_ID'] = app_id
-  apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
-  server = appengine_rpc.HttpRpcServer(servername, auth_func, GetUserAgent(),
-                                       GetSourceName())
-  stub = RemoteDatastoreStub(server, path)
-  apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', stub)
--- a/thirdparty/google_appengine/google/appengine/ext/remote_api/handler.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/handler.py	Thu Feb 12 12:30:36 2009 +0000
@@ -209,7 +209,7 @@
       response.mutable_response().set_contents(response_data.Encode())
       self.response.set_status(200)
     except Exception, e:
-      self.response.set_status(500)
+      self.response.set_status(200)
       response.mutable_exception().set_contents(pickle.dumps(e))
     self.response.out.write(response.Encode())
 
--- a/thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_pb.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_pb.py	Thu Feb 12 12:30:36 2009 +0000
@@ -44,8 +44,9 @@
     self.service_name_ = x
 
   def clear_service_name(self):
-    self.has_service_name_ = 0
-    self.service_name_ = ""
+    if self.has_service_name_:
+      self.has_service_name_ = 0
+      self.service_name_ = ""
 
   def has_service_name(self): return self.has_service_name_
 
@@ -56,8 +57,9 @@
     self.method_ = x
 
   def clear_method(self):
-    self.has_method_ = 0
-    self.method_ = ""
+    if self.has_method_:
+      self.has_method_ = 0
+      self.method_ = ""
 
   def has_method(self): return self.has_method_
 
@@ -201,8 +203,9 @@
   def mutable_response(self): self.has_response_ = 1; return self.response()
 
   def clear_response(self):
-    self.has_response_ = 0;
-    if self.response_ is not None: self.response_.Clear()
+    if self.has_response_:
+      self.has_response_ = 0;
+      if self.response_ is not None: self.response_.Clear()
 
   def has_response(self): return self.has_response_
 
@@ -218,8 +221,9 @@
   def mutable_exception(self): self.has_exception_ = 1; return self.exception()
 
   def clear_exception(self):
-    self.has_exception_ = 0;
-    if self.exception_ is not None: self.exception_.Clear()
+    if self.has_exception_:
+      self.has_exception_ = 0;
+      if self.exception_ is not None: self.exception_.Clear()
 
   def has_exception(self): return self.has_exception_
 
@@ -337,8 +341,9 @@
     self.hash_ = x
 
   def clear_hash(self):
-    self.has_hash_ = 0
-    self.hash_ = ""
+    if self.has_hash_:
+      self.has_hash_ = 0
+      self.hash_ = ""
 
   def has_hash(self): return self.has_hash_
 
@@ -448,8 +453,9 @@
   def mutable_puts(self): self.has_puts_ = 1; return self.puts()
 
   def clear_puts(self):
-    self.has_puts_ = 0;
-    if self.puts_ is not None: self.puts_.Clear()
+    if self.has_puts_:
+      self.has_puts_ = 0;
+      if self.puts_ is not None: self.puts_.Clear()
 
   def has_puts(self): return self.has_puts_
 
@@ -465,8 +471,9 @@
   def mutable_deletes(self): self.has_deletes_ = 1; return self.deletes()
 
   def clear_deletes(self):
-    self.has_deletes_ = 0;
-    if self.deletes_ is not None: self.deletes_.Clear()
+    if self.has_deletes_:
+      self.has_deletes_ = 0;
+      if self.deletes_ is not None: self.deletes_.Clear()
 
   def has_deletes(self): return self.has_deletes_
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,403 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""An apiproxy stub that calls a remote handler via HTTP.
+
+This allows easy remote access to the App Engine datastore, and potentially any
+of the other App Engine APIs, using the same interface you use when accessing
+the service locally.
+
+An example Python script:
+---
+from google.appengine.ext import db
+from google.appengine.ext.remote_api import remote_api_stub
+from myapp import models
+import getpass
+
+def auth_func():
+  return (raw_input('Username:'), getpass.getpass('Password:'))
+
+remote_api_stub.ConfigureRemoteDatastore('my-app', '/remote_api', auth_func)
+
+# Now you can access the remote datastore just as if your code was running on
+# App Engine!
+
+houses = models.House.all().fetch(100)
+for a_house in q:
+  a_house.doors += 1
+db.put(houses)
+---
+
+A few caveats:
+- Where possible, avoid iterating over queries directly. Fetching as many
+  results as you will need is faster and more efficient.
+- If you need to iterate, consider instead fetching items in batches with a sort
+  order and constructing a new query starting from where the previous one left
+  off. The __key__ pseudo-property can be used as a sort key for this purpose,
+  and does not even require a custom index if you are iterating over all
+  entities of a given type.
+- Likewise, it's a good idea to put entities in batches. Instead of calling put
+  for each individual entity, accumulate them and put them in batches using
+  db.put(), if you can.
+- Requests and responses are still limited to 1MB each, so if you have large
+  entities or try and fetch or put many of them at once, your requests may fail.
+"""
+
+
+
+
+
+import os
+import pickle
+import sha
+import sys
+import thread
+import threading
+from google.appengine.api import apiproxy_stub_map
+from google.appengine.datastore import datastore_pb
+from google.appengine.ext.remote_api import remote_api_pb
+from google.appengine.runtime import apiproxy_errors
+from google.appengine.tools import appengine_rpc
+
+
+def GetUserAgent():
+  """Determines the value of the 'User-agent' header to use for HTTP requests.
+
+  Returns:
+    String containing the 'user-agent' header value, which includes the SDK
+    version, the platform information, and the version of Python;
+    e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
+  """
+  product_tokens = []
+
+  product_tokens.append("Google-remote_api/1.0")
+
+  product_tokens.append(appengine_rpc.GetPlatformToken())
+
+  python_version = ".".join(str(i) for i in sys.version_info)
+  product_tokens.append("Python/%s" % python_version)
+
+  return " ".join(product_tokens)
+
+
+def GetSourceName():
+  return "Google-remote_api-1.0"
+
+
+class TransactionData(object):
+  """Encapsulates data about an individual transaction."""
+
+  def __init__(self, thread_id):
+    self.thread_id = thread_id
+    self.preconditions = {}
+    self.entities = {}
+
+
+class RemoteStub(object):
+  """A stub for calling services on a remote server over HTTP.
+
+  You can use this to stub out any service that the remote server supports.
+  """
+
+  def __init__(self, server, path):
+    """Constructs a new RemoteStub that communicates with the specified server.
+
+    Args:
+      server: An instance of a subclass of
+        google.appengine.tools.appengine_rpc.AbstractRpcServer.
+      path: The path to the handler this stub should send requests to.
+    """
+    self._server = server
+    self._path = path
+
+  def MakeSyncCall(self, service, call, request, response):
+    request_pb = remote_api_pb.Request()
+    request_pb.set_service_name(service)
+    request_pb.set_method(call)
+    request_pb.mutable_request().set_contents(request.Encode())
+
+    response_pb = remote_api_pb.Response()
+    response_pb.ParseFromString(self._server.Send(self._path,
+                                                  request_pb.Encode()))
+
+    if response_pb.has_exception():
+      raise pickle.loads(response_pb.exception().contents())
+    else:
+      response.ParseFromString(response_pb.response().contents())
+
+
+class RemoteDatastoreStub(RemoteStub):
+  """A specialised stub for accessing the App Engine datastore remotely.
+
+  A specialised stub is required because there are some datastore operations
+  that preserve state between calls. This stub makes queries possible.
+  Transactions on the remote datastore are unfortunately still impossible.
+  """
+
+  def __init__(self, server, path):
+    super(RemoteDatastoreStub, self).__init__(server, path)
+    self.__queries = {}
+    self.__transactions = {}
+
+    self.__next_local_cursor = 1
+    self.__local_cursor_lock = threading.Lock()
+    self.__next_local_tx = 1
+    self.__local_tx_lock = threading.Lock()
+
+  def MakeSyncCall(self, service, call, request, response):
+    assert service == 'datastore_v3'
+
+    explanation = []
+    assert request.IsInitialized(explanation), explanation
+
+    handler = getattr(self, '_Dynamic_' + call, None)
+    if handler:
+      handler(request, response)
+    else:
+      super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
+                                                    response)
+
+    assert response.IsInitialized(explanation), explanation
+
+  def _Dynamic_RunQuery(self, query, query_result):
+    self.__local_cursor_lock.acquire()
+    try:
+      cursor_id = self.__next_local_cursor
+      self.__next_local_cursor += 1
+    finally:
+      self.__local_cursor_lock.release()
+    self.__queries[cursor_id] = query
+
+    query_result.mutable_cursor().set_cursor(cursor_id)
+    query_result.set_more_results(True)
+
+  def _Dynamic_Next(self, next_request, query_result):
+    cursor = next_request.cursor().cursor()
+    if cursor not in self.__queries:
+      raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
+                                             'Cursor %d not found' % cursor)
+    query = self.__queries[cursor]
+
+    if query is None:
+      query_result.set_more_results(False)
+      return
+
+    request = datastore_pb.Query()
+    request.CopyFrom(query)
+    if request.has_limit():
+      request.set_limit(min(request.limit(), next_request.count()))
+    else:
+      request.set_limit(next_request.count())
+
+    super(RemoteDatastoreStub, self).MakeSyncCall(
+        'remote_datastore', 'RunQuery', request, query_result)
+
+    query.set_offset(query.offset() + query_result.result_size())
+    if query.has_limit():
+      query.set_limit(query.limit() - query_result.result_size())
+    if not query_result.more_results():
+      self.__queries[cursor] = None
+
+  def _Dynamic_Get(self, get_request, get_response):
+    txid = None
+    if get_request.has_transaction():
+      txid = get_request.transaction().handle()
+      txdata = self.__transactions[txid]
+      assert (txdata.thread_id == thread.get_ident(),
+              "Transactions are single-threaded.")
+
+      keys = [(k, k.Encode()) for k in get_request.key_list()]
+
+      new_request = datastore_pb.GetRequest()
+      for key, enckey in keys:
+        if enckey not in txdata.entities:
+          new_request.add_key().CopyFrom(key)
+    else:
+      new_request = get_request
+
+    if new_request.key_size() > 0:
+      super(RemoteDatastoreStub, self).MakeSyncCall(
+          'datastore_v3', 'Get', new_request, get_response)
+
+    if txid is not None:
+      newkeys = new_request.key_list()
+      entities = get_response.entity_list()
+      for key, entity in zip(newkeys, entities):
+        entity_hash = None
+        if entity.has_entity():
+          entity_hash = sha.new(entity.entity().Encode()).digest()
+        txdata.preconditions[key.Encode()] = (key, entity_hash)
+
+      new_response = datastore_pb.GetResponse()
+      it = iter(get_response.entity_list())
+      for key, enckey in keys:
+        if enckey in txdata.entities:
+          cached_entity = txdata.entities[enckey][1]
+          if cached_entity:
+            new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
+          else:
+            new_response.add_entity()
+        else:
+          new_entity = it.next()
+          if new_entity.has_entity():
+            assert new_entity.entity().key() == key
+            new_response.add_entity().CopyFrom(new_entity)
+          else:
+            new_response.add_entity()
+      get_response.CopyFrom(new_response)
+
+  def _Dynamic_Put(self, put_request, put_response):
+    if put_request.has_transaction():
+      entities = put_request.entity_list()
+
+      requires_id = lambda x: x.id() == 0 and not x.has_name()
+      new_ents = [e for e in entities
+                  if requires_id(e.key().path().element_list()[-1])]
+      id_request = remote_api_pb.PutRequest()
+      if new_ents:
+        for ent in new_ents:
+          e = id_request.add_entity()
+          e.mutable_key().CopyFrom(ent.key())
+          e.mutable_entity_group()
+        id_response = datastore_pb.PutResponse()
+        super(RemoteDatastoreStub, self).MakeSyncCall(
+            'remote_datastore', 'GetIDs', id_request, id_response)
+        assert id_request.entity_size() == id_response.key_size()
+        for key, ent in zip(id_response.key_list(), new_ents):
+          ent.mutable_key().CopyFrom(key)
+          ent.mutable_entity_group().add_element().CopyFrom(
+              key.path().element(0))
+
+      txid = put_request.transaction().handle()
+      txdata = self.__transactions[txid]
+      assert (txdata.thread_id == thread.get_ident(),
+              "Transactions are single-threaded.")
+      for entity in entities:
+        txdata.entities[entity.key().Encode()] = (entity.key(), entity)
+        put_response.add_key().CopyFrom(entity.key())
+    else:
+      super(RemoteDatastoreStub, self).MakeSyncCall(
+          'datastore_v3', 'Put', put_request, put_response)
+
+  def _Dynamic_Delete(self, delete_request, response):
+    if delete_request.has_transaction():
+      txid = delete_request.transaction().handle()
+      txdata = self.__transactions[txid]
+      assert (txdata.thread_id == thread.get_ident(),
+              "Transactions are single-threaded.")
+      for key in delete_request.key_list():
+        txdata.entities[key.Encode()] = (key, None)
+    else:
+      super(RemoteDatastoreStub, self).MakeSyncCall(
+          'datastore_v3', 'Delete', delete_request, response)
+
+  def _Dynamic_BeginTransaction(self, request, transaction):
+    self.__local_tx_lock.acquire()
+    try:
+      txid = self.__next_local_tx
+      self.__transactions[txid] = TransactionData(thread.get_ident())
+      self.__next_local_tx += 1
+    finally:
+      self.__local_tx_lock.release()
+    transaction.set_handle(txid)
+
+  def _Dynamic_Commit(self, transaction, transaction_response):
+    txid = transaction.handle()
+    if txid not in self.__transactions:
+      raise apiproxy_errors.ApplicationError(
+          datastore_pb.Error.BAD_REQUEST,
+          'Transaction %d not found.' % (txid,))
+
+    txdata = self.__transactions[txid]
+    assert (txdata.thread_id == thread.get_ident(),
+            "Transactions are single-threaded.")
+    del self.__transactions[txid]
+
+    tx = remote_api_pb.TransactionRequest()
+    for key, hash in txdata.preconditions.values():
+      precond = tx.add_precondition()
+      precond.mutable_key().CopyFrom(key)
+      if hash:
+        precond.set_hash(hash)
+
+    puts = tx.mutable_puts()
+    deletes = tx.mutable_deletes()
+    for key, entity in txdata.entities.values():
+      if entity:
+        puts.add_entity().CopyFrom(entity)
+      else:
+        deletes.add_key().CopyFrom(key)
+
+    super(RemoteDatastoreStub, self).MakeSyncCall(
+        'remote_datastore', 'Transaction',
+        tx, datastore_pb.PutResponse())
+
+  def _Dynamic_Rollback(self, transaction, transaction_response):
+    txid = transaction.handle()
+    self.__local_tx_lock.acquire()
+    try:
+      if txid not in self.__transactions:
+        raise apiproxy_errors.ApplicationError(
+            datastore_pb.Error.BAD_REQUEST,
+            'Transaction %d not found.' % (txid,))
+
+      assert (txdata[txid].thread_id == thread.get_ident(),
+              "Transactions are single-threaded.")
+      del self.__transactions[txid]
+    finally:
+      self.__local_tx_lock.release()
+
+  def _Dynamic_CreateIndex(self, index, id_response):
+    raise apiproxy_errors.CapabilityDisabledError(
+        'The remote datastore does not support index manipulation.')
+
+  def _Dynamic_UpdateIndex(self, index, void):
+    raise apiproxy_errors.CapabilityDisabledError(
+        'The remote datastore does not support index manipulation.')
+
+  def _Dynamic_DeleteIndex(self, index, void):
+    raise apiproxy_errors.CapabilityDisabledError(
+        'The remote datastore does not support index manipulation.')
+
+
+def ConfigureRemoteDatastore(app_id,
+                             path,
+                             auth_func,
+                             servername=None,
+                             rpc_server_factory=appengine_rpc.HttpRpcServer):
+  """Does necessary setup to allow easy remote access to an AppEngine datastore.
+
+  Args:
+    app_id: The app_id of your app, as declared in app.yaml.
+    path: The path to the remote_api handler for your app
+      (for example, '/remote_api').
+    auth_func: A function that takes no arguments and returns a
+      (username, password) tuple. This will be called if your application
+      requires authentication to access the remote_api handler (it should!)
+      and you do not already have a valid auth cookie.
+    servername: The hostname your app is deployed on. Defaults to
+      <app_id>.appspot.com.
+    rpc_server_factory: A factory to construct the rpc server for the datastore.
+  """
+  if not servername:
+    servername = '%s.appspot.com' % (app_id,)
+  os.environ['APPLICATION_ID'] = app_id
+  apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
+  server = rpc_server_factory(servername, auth_func, GetUserAgent(),
+                                       GetSourceName())
+  stub = RemoteDatastoreStub(server, path)
+  apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', stub)
--- a/thirdparty/google_appengine/google/appengine/ext/search/__init__.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/search/__init__.py	Thu Feb 12 12:30:36 2009 +0000
@@ -260,11 +260,30 @@
         filter.set_op(datastore_pb.Query_Filter.EQUAL)
         prop = filter.add_property()
         prop.set_name(SearchableEntity._FULL_TEXT_INDEX_PROPERTY)
+        prop.set_multiple(len(keywords) > 1)
         prop.mutable_value().set_stringvalue(unicode(keyword).encode('utf-8'))
 
     return pb
 
 
+class SearchableMultiQuery(datastore.MultiQuery):
+  """A multiquery that supports Search() by searching subqueries."""
+
+  def Search(self, *args, **kwargs):
+    """Add a search query, by trying to add it to all subqueries.
+
+    Args:
+      args: Passed to Search on each subquery.
+      kwargs: Passed to Search on each subquery.
+
+    Returns:
+      self for consistency with SearchableQuery.
+    """
+    for q in self:
+      q.Search(*args, **kwargs)
+    return self
+
+
 class SearchableModel(db.Model):
   """A subclass of db.Model that supports full text search and indexing.
 
@@ -290,7 +309,9 @@
 
     def _get_query(self):
       """Wraps db.Query._get_query() and injects SearchableQuery."""
-      query = db.Query._get_query(self, _query_class=SearchableQuery)
+      query = db.Query._get_query(self,
+                                  _query_class=SearchableQuery,
+                                  _multi_query_class=SearchableMultiQuery)
       if self._search_query:
         query.Search(self._search_query)
       return query
--- a/thirdparty/google_appengine/google/appengine/ext/webapp/util.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/webapp/util.py	Thu Feb 12 12:30:36 2009 +0000
@@ -38,7 +38,7 @@
 
     @login_required
     def get(self):
-      user = users.GetCurrentUser(self)
+      user = users.get_current_user(self)
       self.response.out.write('Hello, ' + user.nickname())
 
   We will redirect to a login page if the user is not logged in. We always
@@ -49,9 +49,9 @@
     if self.request.method != 'GET':
       raise webapp.Error('The check_login decorator can only be used for GET '
                          'requests')
-    user = users.GetCurrentUser()
+    user = users.get_current_user()
     if not user:
-      self.redirect(users.CreateLoginURL(self.request.uri))
+      self.redirect(users.create_login_url(self.request.uri))
       return
     else:
       handler_method(self, *args)
--- a/thirdparty/google_appengine/google/appengine/runtime/apiproxy.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/runtime/apiproxy.py	Thu Feb 12 12:30:36 2009 +0000
@@ -127,7 +127,7 @@
 
     _apphosting_runtime___python__apiproxy.MakeCall(
         self.package, self.call, e.buffer(), self.__result_dict,
-        self.__MakeCallDone, self)
+        self.__MakeCallDone, self, deadline=(self.deadline or -1))
 
   def __MakeCallDone(self):
     self.__state = RPC.FINISHING
--- a/thirdparty/google_appengine/google/appengine/runtime/apiproxy_errors.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/runtime/apiproxy_errors.py	Thu Feb 12 12:30:36 2009 +0000
@@ -49,6 +49,7 @@
   def __init__(self, application_error, error_detail=''):
     self.application_error = application_error
     self.error_detail = error_detail
+    Error.__init__(self, application_error)
 
   def __str__(self):
     return 'ApplicationError: %d %s' % (self.application_error,
--- a/thirdparty/google_appengine/google/appengine/tools/appcfg.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/appcfg.py	Thu Feb 12 12:30:36 2009 +0000
@@ -71,6 +71,10 @@
 
 
 appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = "python"
+_api_versions = os.environ.get('GOOGLE_TEST_API_VERSIONS', '1')
+_options = validation.Options(*_api_versions.split(','))
+appinfo.AppInfoExternal.ATTRIBUTES[appinfo.API_VERSION] = _options
+del _api_versions, _options
 
 
 def StatusUpdate(msg):
@@ -189,6 +193,29 @@
 
   return version
 
+def RetryWithBackoff(initial_delay, backoff_factor, max_tries, callable):
+    """Calls a function multiple times, backing off more and more each time.
+
+    Args:
+      initial_delay: Initial delay after first try, in seconds.
+      backoff_factor: Delay will be multiplied by this factor after each try.
+      max_tries: Maximum number of tries.
+      callable: The method to call, will pass no arguments.
+
+    Returns:
+      True if the function succeded in one of its tries.
+
+    Raises:
+      Whatever the function raises--an exception will immediately stop retries.
+    """
+    delay = initial_delay
+    while not callable() and max_tries > 0:
+      StatusUpdate("Will check again in %s seconds." % delay)
+      time.sleep(delay)
+      delay *= backoff_factor
+      max_tries -= 1
+    return max_tries > 0
+
 
 class UpdateCheck(object):
   """Determines if the local SDK is the latest version.
@@ -789,7 +816,7 @@
   header uses UTC), and the client's local time is irrelevant.
 
   Args:
-    A posix timestamp giving current UTC time.
+    now: A posix timestamp giving current UTC time.
 
   Returns:
     A pseudo-posix timestamp giving current Pacific time.  Passing
@@ -913,6 +940,7 @@
       hash of the file contents.
     in_transaction: True iff a transaction with the server has started.
       An AppVersionUpload can do only one transaction at a time.
+    deployed: True iff the Deploy method has been called.
   """
 
   def __init__(self, server, config):
@@ -930,6 +958,7 @@
     self.version = self.config.version
     self.files = {}
     self.in_transaction = False
+    self.deployed = False
 
   def _Hash(self, content):
     """Compute the hash of the content.
@@ -1059,6 +1088,8 @@
     All the files returned by Begin() must have been uploaded with UploadFile()
     before Commit() can be called.
 
+    This tries the new 'deploy' method; if that fails it uses the old 'commit'.
+
     Raises:
       Exception: Some required files were not uploaded.
     """
@@ -1066,10 +1097,65 @@
     if self.files:
       raise Exception("Not all required files have been uploaded.")
 
-    StatusUpdate("Closing update.")
-    self.server.Send("/api/appversion/commit", app_id=self.app_id,
+    try:
+      self.Deploy()
+      if not RetryWithBackoff(1, 2, 8, self.IsReady):
+        logging.warning("Version still not ready to serve, aborting.")
+        raise Exception("Version not ready.")
+      self.StartServing()
+    except urllib2.HTTPError, e:
+      if e.code != 404:
+        raise
+      StatusUpdate("Closing update.")
+      self.server.Send("/api/appversion/commit", app_id=self.app_id,
+                       version=self.version)
+      self.in_transaction = False
+
+  def Deploy(self):
+    """Deploys the new app version but does not make it default.
+
+    All the files returned by Begin() must have been uploaded with UploadFile()
+    before Deploy() can be called.
+
+    Raises:
+      Exception: Some required files were not uploaded.
+    """
+    assert self.in_transaction, "Begin() must be called before Deploy()."
+    if self.files:
+      raise Exception("Not all required files have been uploaded.")
+
+    StatusUpdate("Deploying new version.")
+    self.server.Send("/api/appversion/deploy", app_id=self.app_id,
                      version=self.version)
-    self.in_transaction = False
+    self.deployed = True
+
+  def IsReady(self):
+    """Check if the new app version is ready to serve traffic.
+
+    Raises:
+      Exception: Deploy has not yet been called.
+
+    Returns:
+      True if the server returned the app is ready to serve.
+    """
+    assert self.deployed, "Deploy() must be called before IsReady()."
+
+    StatusUpdate("Checking if new version is ready to serve.")
+    result = self.server.Send("/api/appversion/isready", app_id=self.app_id,
+                              version=self.version)
+    return result == "1"
+
+  def StartServing(self):
+    """Start serving with the newly created version.
+
+    Raises:
+      Exception: Deploy has not yet been called.
+    """
+    assert self.deployed, "Deploy() must be called before IsReady()."
+
+    StatusUpdate("Closing update: new version is ready to start serving.")
+    self.server.Send("/api/appversion/startserving",
+                     app_id=self.app_id, version=self.version)
 
   def Rollback(self):
     """Rolls back the transaction if one is in progress."""
@@ -1140,12 +1226,13 @@
             StatusUpdate("Uploaded %d files." % num_files)
 
       self.Commit()
+
     except KeyboardInterrupt:
       logging.info("User interrupted. Aborting.")
       self.Rollback()
       raise
     except:
-      logging.error("An unexpected error occurred. Aborting.")
+      logging.exception("An unexpected error occurred. Aborting.")
       self.Rollback()
       raise
 
@@ -1271,7 +1358,8 @@
                rpc_server_class=appengine_rpc.HttpRpcServer,
                raw_input_fn=raw_input,
                password_input_fn=getpass.getpass,
-               error_fh=sys.stderr):
+               error_fh=sys.stderr,
+               update_check_class=UpdateCheck):
     """Initializer.  Parses the cmdline and selects the Action to use.
 
     Initializes all of the attributes described in the class docstring.
@@ -1284,6 +1372,7 @@
       raw_input_fn: Function used for getting user email.
       password_input_fn: Function used for getting user password.
       error_fh: Unexpected HTTPErrors are printed to this file handle.
+      update_check_class: UpdateCheck class (can be replaced for testing).
     """
     self.parser_class = parser_class
     self.argv = argv
@@ -1291,6 +1380,7 @@
     self.raw_input_fn = raw_input_fn
     self.password_input_fn = password_input_fn
     self.error_fh = error_fh
+    self.update_check_class = update_check_class
 
     self.parser = self._GetOptionParser()
     for action in self.actions.itervalues():
@@ -1571,7 +1661,7 @@
     appyaml = self._ParseAppYaml(basepath)
     rpc_server = self._GetRpcServer()
 
-    updatecheck = UpdateCheck(rpc_server, appyaml)
+    updatecheck = self.update_check_class(rpc_server, appyaml)
     updatecheck.CheckForUpdates()
 
     appversion = AppVersionUpload(rpc_server, appyaml)
@@ -1603,7 +1693,7 @@
       parser: An instance of OptionsParser.
     """
     parser.add_option("-S", "--max_size", type="int", dest="max_size",
-                      default=1048576, metavar="SIZE",
+                      default=10485760, metavar="SIZE",
                       help="Maximum size of a file to upload.")
 
   def VacuumIndexes(self):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/bulkloader.py	Thu Feb 12 12:30:36 2009 +0000
@@ -0,0 +1,2588 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Imports CSV data over HTTP.
+
+Usage:
+  %(arg0)s [flags]
+
+    --debug                 Show debugging information. (Optional)
+    --app_id=<string>       Application ID of endpoint (Optional for
+                            *.appspot.com)
+    --auth_domain=<domain>  The auth domain to use for logging in and for
+                            UserProperties. (Default: gmail.com)
+    --bandwidth_limit=<int> The maximum number of bytes per second for the
+                            aggregate transfer of data to the server. Bursts
+    --batch_size=<int>      Number of Entity objects to include in each post to
+                            the URL endpoint. The more data per row/Entity, the
+                            smaller the batch size should be. (Default 10)
+    --config_file=<path>    File containing Model and Loader definitions.
+                            (Required)
+    --db_filename=<path>    Specific progress database to write to, or to
+                            resume from. If not supplied, then a new database
+                            will be started, named:
+                            bulkloader-progress-TIMESTAMP.
+                            The special filename "skip" may be used to simply
+                            skip reading/writing any progress information.
+    --filename=<path>       Path to the CSV file to import. (Required)
+    --http_limit=<int>      The maximum numer of HTTP requests per second to
+                            send to the server. (Default: 8)
+    --kind=<string>         Name of the Entity object kind to put in the
+                            datastore. (Required)
+    --num_threads=<int>     Number of threads to use for uploading entities
+                            (Default 10)
+                            may exceed this, but overall transfer rate is
+                            restricted to this rate. (Default 250000)
+    --rps_limit=<int>       The maximum number of records per second to
+                            transfer to the server. (Default: 20)
+    --url=<string>          URL endpoint to post to for importing data.
+                            (Required)
+
+The exit status will be 0 on success, non-zero on import failure.
+
+Works with the remote_api mix-in library for google.appengine.ext.remote_api.
+Please look there for documentation about how to setup the server side.
+
+Example:
+
+%(arg0)s --url=http://app.appspot.com/remote_api --kind=Model \
+ --filename=data.csv --config_file=loader_config.py
+
+"""
+
+
+
+import csv
+import getopt
+import getpass
+import logging
+import new
+import os
+import Queue
+import signal
+import sys
+import threading
+import time
+import traceback
+import urllib2
+import urlparse
+
+from google.appengine.ext import db
+from google.appengine.ext.remote_api import remote_api_stub
+from google.appengine.tools import appengine_rpc
+
+try:
+  import sqlite3
+except ImportError:
+  pass
+
+UPLOADER_VERSION = '1'
+
+DEFAULT_THREAD_COUNT = 10
+
+DEFAULT_BATCH_SIZE = 10
+
+DEFAULT_QUEUE_SIZE = DEFAULT_THREAD_COUNT * 10
+
+_THREAD_SHOULD_EXIT = '_THREAD_SHOULD_EXIT'
+
+STATE_READ = 0
+STATE_SENDING = 1
+STATE_SENT = 2
+STATE_NOT_SENT = 3
+
+MINIMUM_THROTTLE_SLEEP_DURATION = 0.001
+
+DATA_CONSUMED_TO_HERE = 'DATA_CONSUMED_TO_HERE'
+
+INITIAL_BACKOFF = 1.0
+
+BACKOFF_FACTOR = 2.0
+
+
+DEFAULT_BANDWIDTH_LIMIT = 250000
+
+DEFAULT_RPS_LIMIT = 20
+
+DEFAULT_REQUEST_LIMIT = 8
+
+BANDWIDTH_UP = 'http-bandwidth-up'
+BANDWIDTH_DOWN = 'http-bandwidth-down'
+REQUESTS = 'http-requests'
+HTTPS_BANDWIDTH_UP = 'https-bandwidth-up'
+HTTPS_BANDWIDTH_DOWN = 'https-bandwidth-down'
+HTTPS_REQUESTS = 'https-requests'
+RECORDS = 'records'
+
+
+def StateMessage(state):
+  """Converts a numeric state identifier to a status message."""
+  return ({
+      STATE_READ: 'Batch read from file.',
+      STATE_SENDING: 'Sending batch to server.',
+      STATE_SENT: 'Batch successfully sent.',
+      STATE_NOT_SENT: 'Error while sending batch.'
+  }[state])
+
+
+class Error(Exception):
+  """Base-class for exceptions in this module."""
+
+
+class FatalServerError(Error):
+  """An unrecoverable error occurred while trying to post data to the server."""
+
+
+class ResumeError(Error):
+  """Error while trying to resume a partial upload."""
+
+
+class ConfigurationError(Error):
+  """Error in configuration options."""
+
+
+class AuthenticationError(Error):
+  """Error while trying to authenticate with the server."""
+
+
+def GetCSVGeneratorFactory(csv_filename, batch_size,
+                           openfile=open, create_csv_reader=csv.reader):
+  """Return a factory that creates a CSV-based WorkItem generator.
+
+  Args:
+    csv_filename: File on disk containing CSV data.
+    batch_size: Maximum number of CSV rows to stash into a WorkItem.
+    openfile: Used for dependency injection.
+    create_csv_reader: Used for dependency injection.
+
+  Returns: A callable (accepting the Progress Queue and Progress
+    Generators as input) which creates the WorkItem generator.
+  """
+
+  def CreateGenerator(progress_queue, progress_generator):
+    """Initialize a CSV generator linked to a progress generator and queue.
+
+    Args:
+      progress_queue: A ProgressQueue instance to send progress information.
+      progress_generator: A generator of progress information or None.
+
+    Returns:
+      A CSVGenerator instance.
+    """
+    return CSVGenerator(progress_queue,
+                        progress_generator,
+                        csv_filename,
+                        batch_size,
+                        openfile,
+                        create_csv_reader)
+  return CreateGenerator
+
+
+class CSVGenerator(object):
+  """Reads a CSV file and generates WorkItems containing batches of records."""
+
+  def __init__(self,
+               progress_queue,
+               progress_generator,
+               csv_filename,
+               batch_size,
+               openfile,
+               create_csv_reader):
+    """Initializes a CSV generator.
+
+    Args:
+      progress_queue: A queue used for tracking progress information.
+      progress_generator: A generator of prior progress information, or None
+        if there is no prior status.
+      csv_filename: File on disk containing CSV data.
+      batch_size: Maximum number of CSV rows to stash into a WorkItem.
+      openfile: Used for dependency injection of 'open'.
+      create_csv_reader: Used for dependency injection of 'csv.reader'.
+    """
+    self.progress_queue = progress_queue
+    self.progress_generator = progress_generator
+    self.csv_filename = csv_filename
+    self.batch_size = batch_size
+    self.openfile = openfile
+    self.create_csv_reader = create_csv_reader
+    self.line_number = 1
+    self.column_count = None
+    self.read_rows = []
+    self.reader = None
+    self.row_count = 0
+    self.sent_count = 0
+
+  def _AdvanceTo(self, line):
+    """Advance the reader to the given line.
+
+    Args:
+      line: A line number to advance to.
+    """
+    while self.line_number < line:
+      self.reader.next()
+      self.line_number += 1
+      self.row_count += 1
+      self.sent_count += 1
+
+  def _ReadRows(self, key_start, key_end):
+    """Attempts to read and encode rows [key_start, key_end].
+
+    The encoded rows are stored in self.read_rows.
+
+    Args:
+      key_start: The starting line number.
+      key_end: The ending line number.
+
+    Raises:
+      StopIteration: if the reader runs out of rows
+      ResumeError: if there are an inconsistent number of columns.
+    """
+    assert self.line_number == key_start
+    self.read_rows = []
+    while self.line_number <= key_end:
+      row = self.reader.next()
+      self.row_count += 1
+      if self.column_count is None:
+        self.column_count = len(row)
+      else:
+        if self.column_count != len(row):
+          raise ResumeError('Column count mismatch, %d: %s' %
+                            (self.column_count, str(row)))
+      self.read_rows.append((self.line_number, row))
+      self.line_number += 1
+
+  def _MakeItem(self, key_start, key_end, rows, progress_key=None):
+    """Makes a WorkItem containing the given rows, with the given keys.
+
+    Args:
+      key_start: The start key for the WorkItem.
+      key_end: The end key for the WorkItem.
+      rows: A list of the rows for the WorkItem.
+      progress_key: The progress key for the WorkItem
+
+    Returns:
+      A WorkItem instance for the given batch.
+    """
+    assert rows
+
+    item = WorkItem(self.progress_queue, rows,
+                    key_start, key_end,
+                    progress_key=progress_key)
+
+    return item
+
+  def Batches(self):
+    """Reads the CSV data file and generates WorkItems.
+
+    Yields:
+      Instances of class WorkItem
+
+    Raises:
+      ResumeError: If the progress database and data file indicate a different
+        number of rows.
+    """
+    csv_file = self.openfile(self.csv_filename, 'r')
+    csv_content = csv_file.read()
+    if csv_content:
+      has_headers = csv.Sniffer().has_header(csv_content)
+    else:
+      has_headers = False
+    csv_file.seek(0)
+    self.reader = self.create_csv_reader(csv_file, skipinitialspace=True)
+    if has_headers:
+      logging.info('The CSV file appears to have a header line, skipping.')
+      self.reader.next()
+
+    exhausted = False
+
+    self.line_number = 1
+    self.column_count = None
+
+    logging.info('Starting import; maximum %d entities per post',
+                 self.batch_size)
+
+    state = None
+    if self.progress_generator is not None:
+      for progress_key, state, key_start, key_end in self.progress_generator:
+        if key_start:
+          try:
+            self._AdvanceTo(key_start)
+            self._ReadRows(key_start, key_end)
+            yield self._MakeItem(key_start,
+                                 key_end,
+                                 self.read_rows,
+                                 progress_key=progress_key)
+          except StopIteration:
+            logging.error('Mismatch between data file and progress database')
+            raise ResumeError(
+                'Mismatch between data file and progress database')
+        elif state == DATA_CONSUMED_TO_HERE:
+          try:
+            self._AdvanceTo(key_end + 1)
+          except StopIteration:
+            state = None
+
+    if self.progress_generator is None or state == DATA_CONSUMED_TO_HERE:
+      while not exhausted:
+        key_start = self.line_number
+        key_end = self.line_number + self.batch_size - 1
+        try:
+          self._ReadRows(key_start, key_end)
+        except StopIteration:
+          exhausted = True
+          key_end = self.line_number - 1
+        if key_start <= key_end:
+          yield self._MakeItem(key_start, key_end, self.read_rows)
+
+
+class ReQueue(object):
+  """A special thread-safe queue.
+
+  A ReQueue allows unfinished work items to be returned with a call to
+  reput().  When an item is reput, task_done() should *not* be called
+  in addition, getting an item that has been reput does not increase
+  the number of outstanding tasks.
+
+  This class shares an interface with Queue.Queue and provides the
+  additional Reput method.
+  """
+
+  def __init__(self,
+               queue_capacity,
+               requeue_capacity=None,
+               queue_factory=Queue.Queue,
+               get_time=time.time):
+    """Initialize a ReQueue instance.
+
+    Args:
+      queue_capacity: The number of items that can be put in the ReQueue.
+      requeue_capacity: The numer of items that can be reput in the ReQueue.
+      queue_factory: Used for dependency injection.
+      get_time: Used for dependency injection.
+    """
+    if requeue_capacity is None:
+      requeue_capacity = queue_capacity
+
+    self.get_time = get_time
+    self.queue = queue_factory(queue_capacity)
+    self.requeue = queue_factory(requeue_capacity)
+    self.lock = threading.Lock()
+    self.put_cond = threading.Condition(self.lock)
+    self.get_cond = threading.Condition(self.lock)
+
+  def _DoWithTimeout(self,
+                     action,
+                     exc,
+                     wait_cond,
+                     done_cond,
+                     lock,
+                     timeout=None,
+                     block=True):
+    """Performs the given action with a timeout.
+
+    The action must be non-blocking, and raise an instance of exc on a
+    recoverable failure.  If the action fails with an instance of exc,
+    we wait on wait_cond before trying again.  Failure after the
+    timeout is reached is propagated as an exception.  Success is
+    signalled by notifying on done_cond and returning the result of
+    the action.  If action raises any exception besides an instance of
+    exc, it is immediately propagated.
+
+    Args:
+      action: A callable that performs a non-blocking action.
+      exc: An exception type that is thrown by the action to indicate
+        a recoverable error.
+      wait_cond: A condition variable which should be waited on when
+        action throws exc.
+      done_cond: A condition variable to signal if the action returns.
+      lock: The lock used by wait_cond and done_cond.
+      timeout: A non-negative float indicating the maximum time to wait.
+      block: Whether to block if the action cannot complete immediately.
+
+    Returns:
+      The result of the action, if it is successful.
+
+    Raises:
+      ValueError: If the timeout argument is negative.
+    """
+    if timeout is not None and timeout < 0.0:
+      raise ValueError('\'timeout\' must not be a negative  number')
+    if not block:
+      timeout = 0.0
+    result = None
+    success = False
+    start_time = self.get_time()
+    lock.acquire()
+    try:
+      while not success:
+        try:
+          result = action()
+          success = True
+        except Exception, e:
+          if not isinstance(e, exc):
+            raise e
+          if timeout is not None:
+            elapsed_time = self.get_time() - start_time
+            timeout -= elapsed_time
+            if timeout <= 0.0:
+              raise e
+          wait_cond.wait(timeout)
+    finally:
+      if success:
+        done_cond.notify()
+      lock.release()
+    return result
+
+  def put(self, item, block=True, timeout=None):
+    """Put an item into the requeue.
+
+    Args:
+      item: An item to add to the requeue.
+      block: Whether to block if the requeue is full.
+      timeout: Maximum on how long to wait until the queue is non-full.
+
+    Raises:
+      Queue.Full if the queue is full and the timeout expires.
+    """
+    def PutAction():
+      self.queue.put(item, block=False)
+    self._DoWithTimeout(PutAction,
+                        Queue.Full,
+                        self.get_cond,
+                        self.put_cond,
+                        self.lock,
+                        timeout=timeout,
+                        block=block)
+
+  def reput(self, item, block=True, timeout=None):
+    """Re-put an item back into the requeue.
+
+    Re-putting an item does not increase the number of outstanding
+    tasks, so the reput item should be uniquely associated with an
+    item that was previously removed from the requeue and for which
+    task_done has not been called.
+
+    Args:
+      item: An item to add to the requeue.
+      block: Whether to block if the requeue is full.
+      timeout: Maximum on how long to wait until the queue is non-full.
+
+    Raises:
+      Queue.Full is the queue is full and the timeout expires.
+    """
+    def ReputAction():
+      self.requeue.put(item, block=False)
+    self._DoWithTimeout(ReputAction,
+                        Queue.Full,
+                        self.get_cond,
+                        self.put_cond,
+                        self.lock,
+                        timeout=timeout,
+                        block=block)
+
+  def get(self, block=True, timeout=None):
+    """Get an item from the requeue.
+
+    Args:
+      block: Whether to block if the requeue is empty.
+      timeout: Maximum on how long to wait until the requeue is non-empty.
+
+    Returns:
+      An item from the requeue.
+
+    Raises:
+      Queue.Empty if the queue is empty and the timeout expires.
+    """
+    def GetAction():
+      try:
+        result = self.requeue.get(block=False)
+        self.requeue.task_done()
+      except Queue.Empty:
+        result = self.queue.get(block=False)
+      return result
+    return self._DoWithTimeout(GetAction,
+                               Queue.Empty,
+                               self.put_cond,
+                               self.get_cond,
+                               self.lock,
+                               timeout=timeout,
+                               block=block)
+
+  def join(self):
+    """Blocks until all of the items in the requeue have been processed."""
+    self.queue.join()
+
+  def task_done(self):
+    """Indicate that a previously enqueued item has been fully processed."""
+    self.queue.task_done()
+
+  def empty(self):
+    """Returns true if the requeue is empty."""
+    return self.queue.empty() and self.requeue.empty()
+
+  def get_nowait(self):
+    """Try to get an item from the queue without blocking."""
+    return self.get(block=False)
+
+
+class ThrottleHandler(urllib2.BaseHandler):
+  """A urllib2 handler for http and https requests that adds to a throttle."""
+
+  def __init__(self, throttle):
+    """Initialize a ThrottleHandler.
+
+    Args:
+      throttle: A Throttle instance to call for bandwidth and http/https request
+        throttling.
+    """
+    self.throttle = throttle
+
+  def AddRequest(self, throttle_name, req):
+    """Add to bandwidth throttle for given request.
+
+    Args:
+      throttle_name: The name of the bandwidth throttle to add to.
+      req: The request whose size will be added to the throttle.
+    """
+    size = 0
+    for key, value in req.headers.iteritems():
+      size += len('%s: %s\n' % (key, value))
+    for key, value in req.unredirected_hdrs.iteritems():
+      size += len('%s: %s\n' % (key, value))
+    (unused_scheme,
+     unused_host_port, url_path,
+     unused_query, unused_fragment) = urlparse.urlsplit(req.get_full_url())
+    size += len('%s %s HTTP/1.1\n' % (req.get_method(), url_path))
+    data = req.get_data()
+    if data:
+      size += len(data)
+    self.throttle.AddTransfer(throttle_name, size)
+
+  def AddResponse(self, throttle_name, res):
+    """Add to bandwidth throttle for given response.
+
+    Args:
+      throttle_name: The name of the bandwidth throttle to add to.
+      res: The response whose size will be added to the throttle.
+    """
+    content = res.read()
+    def ReturnContent():
+      return content
+    res.read = ReturnContent
+    size = len(content)
+    headers = res.info()
+    for key, value in headers.items():
+      size += len('%s: %s\n' % (key, value))
+    self.throttle.AddTransfer(throttle_name, size)
+
+  def http_request(self, req):
+    """Process an HTTP request.
+
+    If the throttle is over quota, sleep first.  Then add request size to
+    throttle before returning it to be sent.
+
+    Args:
+      req: A urllib2.Request object.
+
+    Returns:
+      The request passed in.
+    """
+    self.throttle.Sleep()
+    self.AddRequest(BANDWIDTH_UP, req)
+    return req
+
+  def https_request(self, req):
+    """Process an HTTPS request.
+
+    If the throttle is over quota, sleep first.  Then add request size to
+    throttle before returning it to be sent.
+
+    Args:
+      req: A urllib2.Request object.
+
+    Returns:
+      The request passed in.
+    """
+    self.throttle.Sleep()
+    self.AddRequest(HTTPS_BANDWIDTH_UP, req)
+    return req
+
+  def http_response(self, unused_req, res):
+    """Process an HTTP response.
+
+    The size of the response is added to the bandwidth throttle and the request
+    throttle is incremented by one.
+
+    Args:
+      unused_req: The urllib2 request for this response.
+      res: A urllib2 response object.
+
+    Returns:
+      The response passed in.
+    """
+    self.AddResponse(BANDWIDTH_DOWN, res)
+    self.throttle.AddTransfer(REQUESTS, 1)
+    return res
+
+  def https_response(self, unused_req, res):
+    """Process an HTTPS response.
+
+    The size of the response is added to the bandwidth throttle and the request
+    throttle is incremented by one.
+
+    Args:
+      unused_req: The urllib2 request for this response.
+      res: A urllib2 response object.
+
+    Returns:
+      The response passed in.
+    """
+    self.AddResponse(HTTPS_BANDWIDTH_DOWN, res)
+    self.throttle.AddTransfer(HTTPS_REQUESTS, 1)
+    return res
+
+
+class ThrottledHttpRpcServer(appengine_rpc.HttpRpcServer):
+  """Provides a simplified RPC-style interface for HTTP requests.
+
+  This RPC server uses a Throttle to prevent exceeding quotas.
+  """
+
+  def __init__(self, throttle, request_manager, *args, **kwargs):
+    """Initialize a ThrottledHttpRpcServer.
+
+    Also sets request_manager.rpc_server to the ThrottledHttpRpcServer instance.
+
+    Args:
+      throttle: A Throttles instance.
+      request_manager: A RequestManager instance.
+      args: Positional arguments to pass through to
+        appengine_rpc.HttpRpcServer.__init__
+      kwargs: Keyword arguments to pass through to
+        appengine_rpc.HttpRpcServer.__init__
+    """
+    self.throttle = throttle
+    appengine_rpc.HttpRpcServer.__init__(self, *args, **kwargs)
+    request_manager.rpc_server = self
+
+  def _GetOpener(self):
+    """Returns an OpenerDirector that supports cookies and ignores redirects.
+
+    Returns:
+      A urllib2.OpenerDirector object.
+    """
+    opener = appengine_rpc.HttpRpcServer._GetOpener(self)
+    opener.add_handler(ThrottleHandler(self.throttle))
+
+    return opener
+
+
+def ThrottledHttpRpcServerFactory(throttle, request_manager):
+  """Create a factory to produce ThrottledHttpRpcServer for a given throttle.
+
+  Args:
+    throttle: A Throttle instance to use for the ThrottledHttpRpcServer.
+    request_manager: A RequestManager instance.
+
+  Returns:
+    A factory to produce a ThrottledHttpRpcServer.
+  """
+  def MakeRpcServer(*args, **kwargs):
+    kwargs['account_type'] = 'HOSTED_OR_GOOGLE'
+    kwargs['save_cookies'] = True
+    return ThrottledHttpRpcServer(throttle, request_manager, *args, **kwargs)
+  return MakeRpcServer
+
+
+class RequestManager(object):
+  """A class which wraps a connection to the server."""
+
+  source = 'google-bulkloader-%s' % UPLOADER_VERSION
+  user_agent = source
+
+  def __init__(self,
+               app_id,
+               host_port,
+               url_path,
+               kind,
+               throttle):
+    """Initialize a RequestManager object.
+
+    Args:
+      app_id: String containing the application id for requests.
+      host_port: String containing the "host:port" pair; the port is optional.
+      url_path: partial URL (path) to post entity data to.
+      kind: Kind of the Entity records being posted.
+      throttle: A Throttle instance.
+    """
+    self.app_id = app_id
+    self.host_port = host_port
+    self.host = host_port.split(':')[0]
+    if url_path and url_path[0] != '/':
+      url_path = '/' + url_path
+    self.url_path = url_path
+    self.kind = kind
+    self.throttle = throttle
+    self.credentials = None
+    throttled_rpc_server_factory = ThrottledHttpRpcServerFactory(
+        self.throttle, self)
+    logging.debug('Configuring remote_api. app_id = %s, url_path = %s, '
+                  'servername = %s' % (app_id, url_path, host_port))
+    remote_api_stub.ConfigureRemoteDatastore(
+        app_id,
+        url_path,
+        self.AuthFunction,
+        servername=host_port,
+        rpc_server_factory=throttled_rpc_server_factory)
+    self.authenticated = False
+
+  def Authenticate(self):
+    """Invoke authentication if necessary."""
+    self.rpc_server.Send(self.url_path, payload=None)
+    self.authenticated = True
+
+  def AuthFunction(self,
+                   raw_input_fn=raw_input,
+                   password_input_fn=getpass.getpass):
+    """Prompts the user for a username and password.
+
+    Caches the results the first time it is called and returns the
+    same result every subsequent time.
+
+    Args:
+      raw_input_fn: Used for dependency injection.
+      password_input_fn: Used for dependency injection.
+
+    Returns:
+      A pair of the username and password.
+    """
+    if self.credentials is not None:
+      return self.credentials
+    print 'Please enter login credentials for %s (%s)' % (
+        self.host, self.app_id)
+    email = raw_input_fn('Email: ')
+    if email:
+      password_prompt = 'Password for %s: ' % email
+      password = password_input_fn(password_prompt)
+    else:
+      password = None
+    self.credentials = (email, password)
+    return self.credentials
+
+  def _GetHeaders(self):
+    """Constructs a dictionary of extra headers to send with a request."""
+    headers = {
+        'GAE-Uploader-Version': UPLOADER_VERSION,
+        'GAE-Uploader-Kind': self.kind
+        }
+    return headers
+
+  def EncodeContent(self, rows):
+    """Encodes row data to the wire format.
+
+    Args:
+      rows: A list of pairs of a line number and a list of column values.
+
+    Returns:
+      A list of db.Model instances.
+    """
+    try:
+      loader = Loader.RegisteredLoaders()[self.kind]
+    except KeyError:
+      logging.error('No Loader defined for kind %s.' % self.kind)
+      raise ConfigurationError('No Loader defined for kind %s.' % self.kind)
+    entities = []
+    for line_number, values in rows:
+      key = loader.GenerateKey(line_number, values)
+      entity = loader.CreateEntity(values, key_name=key)
+      entities.extend(entity)
+
+    return entities
+
+  def PostEntities(self, item):
+    """Posts Entity records to a remote endpoint over HTTP.
+
+    Args:
+      item: A workitem containing the entities to post.
+
+    Returns:
+      A pair of the estimated size of the request in bytes and the response
+        from the server as a str.
+    """
+    entities = item.content
+    db.put(entities)
+
+
+class WorkItem(object):
+  """Holds a unit of uploading work.
+
+  A WorkItem represents a number of entities that need to be uploaded to
+  Google App Engine. These entities are encoded in the "content" field of
+  the WorkItem, and will be POST'd as-is to the server.
+
+  The entities are identified by a range of numeric keys, inclusively. In
+  the case of a resumption of an upload, or a replay to correct errors,
+  these keys must be able to identify the same set of entities.
+
+  Note that keys specify a range. The entities do not have to sequentially
+  fill the entire range, they must simply bound a range of valid keys.
+  """
+
+  def __init__(self, progress_queue, rows, key_start, key_end,
+               progress_key=None):
+    """Initialize the WorkItem instance.
+
+    Args:
+      progress_queue: A queue used for tracking progress information.
+      rows: A list of pairs of a line number and a list of column values
+      key_start: The (numeric) starting key, inclusive.
+      key_end: The (numeric) ending key, inclusive.
+      progress_key: If this WorkItem represents state from a prior run,
+        then this will be the key within the progress database.
+    """
+    self.state = STATE_READ
+
+    self.progress_queue = progress_queue
+
+    assert isinstance(key_start, (int, long))
+    assert isinstance(key_end, (int, long))
+    assert key_start <= key_end
+
+    self.key_start = key_start
+    self.key_end = key_end
+    self.progress_key = progress_key
+
+    self.progress_event = threading.Event()
+
+    self.rows = rows
+    self.content = None
+    self.count = len(rows)
+
+  def MarkAsRead(self):
+    """Mark this WorkItem as read/consumed from the data source."""
+
+    assert self.state == STATE_READ
+
+    self._StateTransition(STATE_READ, blocking=True)
+
+    assert self.progress_key is not None
+
+  def MarkAsSending(self):
+    """Mark this WorkItem as in-process on being uploaded to the server."""
+
+    assert self.state == STATE_READ or self.state == STATE_NOT_SENT
+    assert self.progress_key is not None
+
+    self._StateTransition(STATE_SENDING, blocking=True)
+
+  def MarkAsSent(self):
+    """Mark this WorkItem as sucessfully-sent to the server."""
+
+    assert self.state == STATE_SENDING
+    assert self.progress_key is not None
+
+    self._StateTransition(STATE_SENT, blocking=False)
+
+  def MarkAsError(self):
+    """Mark this WorkItem as required manual error recovery."""
+
+    assert self.state == STATE_SENDING
+    assert self.progress_key is not None
+
+    self._StateTransition(STATE_NOT_SENT, blocking=True)
+
+  def _StateTransition(self, new_state, blocking=False):
+    """Transition the work item to a new state, storing progress information.
+
+    Args:
+      new_state: The state to transition to.
+      blocking: Whether to block for the progress thread to acknowledge the
+        transition.
+    """
+    logging.debug('[%s-%s] %s' %
+                  (self.key_start, self.key_end, StateMessage(self.state)))
+    assert not self.progress_event.isSet()
+
+    self.state = new_state
+
+    self.progress_queue.put(self)
+
+    if blocking:
+      self.progress_event.wait()
+
+      self.progress_event.clear()
+
+
+
+def InterruptibleSleep(sleep_time):
+  """Puts thread to sleep, checking this threads exit_flag twice a second.
+
+  Args:
+    sleep_time: Time to sleep.
+  """
+  slept = 0.0
+  epsilon = .0001
+  thread = threading.currentThread()
+  while slept < sleep_time - epsilon:
+    remaining = sleep_time - slept
+    this_sleep_time = min(remaining, 0.5)
+    time.sleep(this_sleep_time)
+    slept += this_sleep_time
+    if thread.exit_flag:
+      return
+
+
+class ThreadGate(object):
+  """Manage the number of active worker threads.
+
+  The ThreadGate limits the number of threads that are simultaneously
+  uploading batches of records in order to implement adaptive rate
+  control.  The number of simultaneous upload threads that it takes to
+  start causing timeout varies widely over the course of the day, so
+  adaptive rate control allows the uploader to do many uploads while
+  reducing the error rate and thus increasing the throughput.
+
+  Initially the ThreadGate allows only one uploader thread to be active.
+  For each successful upload, another thread is activated and for each
+  failed upload, the number of active threads is reduced by one.
+  """
+
+  def __init__(self, enabled, sleep=InterruptibleSleep):
+    self.enabled = enabled
+    self.enabled_count = 1
+    self.lock = threading.Lock()
+    self.thread_semaphore = threading.Semaphore(self.enabled_count)
+    self._threads = []
+    self.backoff_time = 0
+    self.sleep = sleep
+
+  def Register(self, thread):
+    """Register a thread with the thread gate."""
+    self._threads.append(thread)
+
+  def Threads(self):
+    """Yields the registered threads."""
+    for thread in self._threads:
+      yield thread
+
+  def EnableThread(self):
+    """Enable one more worker thread."""
+    self.lock.acquire()
+    try:
+      self.enabled_count += 1
+    finally:
+      self.lock.release()
+    self.thread_semaphore.release()
+
+  def EnableAllThreads(self):
+    """Enable all worker threads."""
+    for unused_idx in range(len(self._threads) - self.enabled_count):
+      self.EnableThread()
+
+  def StartWork(self):
+    """Starts a critical section in which the number of workers is limited.
+
+    If thread throttling is enabled then this method starts a critical
+    section which allows self.enabled_count simultaneously operating
+    threads. The critical section is ended by calling self.FinishWork().
+    """
+    if self.enabled:
+      self.thread_semaphore.acquire()
+      if self.backoff_time > 0.0:
+        if not threading.currentThread().exit_flag:
+          logging.info('Backing off: %.1f seconds',
+                       self.backoff_time)
+        self.sleep(self.backoff_time)
+
+  def FinishWork(self):
+    """Ends a critical section started with self.StartWork()."""
+    if self.enabled:
+      self.thread_semaphore.release()
+
+  def IncreaseWorkers(self):
+    """Informs the throttler that an item was successfully sent.
+
+    If thread throttling is enabled, this method will cause an
+    additional thread to run in the critical section.
+    """
+    if self.enabled:
+      if self.backoff_time > 0.0:
+        logging.info('Resetting backoff to 0.0')
+        self.backoff_time = 0.0
+      do_enable = False
+      self.lock.acquire()
+      try:
+        if self.enabled and len(self._threads) > self.enabled_count:
+          do_enable = True
+          self.enabled_count += 1
+      finally:
+        self.lock.release()
+      if do_enable:
+        self.thread_semaphore.release()
+
+  def DecreaseWorkers(self):
+    """Informs the thread_gate that an item failed to send.
+
+    If thread throttling is enabled, this method will cause the
+    throttler to allow one fewer thread in the critical section. If
+    there is only one thread remaining, failures will result in
+    exponential backoff until there is a success.
+    """
+    if self.enabled:
+      do_disable = False
+      self.lock.acquire()
+      try:
+        if self.enabled:
+          if self.enabled_count > 1:
+            do_disable = True
+            self.enabled_count -= 1
+          else:
+            if self.backoff_time == 0.0:
+              self.backoff_time = INITIAL_BACKOFF
+            else:
+              self.backoff_time *= BACKOFF_FACTOR
+      finally:
+        self.lock.release()
+      if do_disable:
+        self.thread_semaphore.acquire()
+
+
+class Throttle(object):
+  """A base class for upload rate throttling.
+
+  Transferring large number of records, too quickly, to an application
+  could trigger quota limits and cause the transfer process to halt.
+  In order to stay within the application's quota, we throttle the
+  data transfer to a specified limit (across all transfer threads).
+  This limit defaults to about half of the Google App Engine default
+  for an application, but can be manually adjusted faster/slower as
+  appropriate.
+
+  This class tracks a moving average of some aspect of the transfer
+  rate (bandwidth, records per second, http connections per
+  second). It keeps two windows of counts of bytes transferred, on a
+  per-thread basis. One block is the "current" block, and the other is
+  the "prior" block. It will rotate the counts from current to prior
+  when ROTATE_PERIOD has passed.  Thus, the current block will
+  represent from 0 seconds to ROTATE_PERIOD seconds of activity
+  (determined by: time.time() - self.last_rotate).  The prior block
+  will always represent a full ROTATE_PERIOD.
+
+  Sleeping is performed just before a transfer of another block, and is
+  based on the counts transferred *before* the next transfer. It really
+  does not matter how much will be transferred, but only that for all the
+  data transferred SO FAR that we have interspersed enough pauses to
+  ensure the aggregate transfer rate is within the specified limit.
+
+  These counts are maintained on a per-thread basis, so we do not require
+  any interlocks around incrementing the counts. There IS an interlock on
+  the rotation of the counts because we do not want multiple threads to
+  multiply-rotate the counts.
+
+  There are various race conditions in the computation and collection
+  of these counts. We do not require precise values, but simply to
+  keep the overall transfer within the bandwidth limits. If a given
+  pause is a little short, or a little long, then the aggregate delays
+  will be correct.
+  """
+
+  ROTATE_PERIOD = 600
+
+  def __init__(self,
+               get_time=time.time,
+               thread_sleep=InterruptibleSleep,
+               layout=None):
+    self.get_time = get_time
+    self.thread_sleep = thread_sleep
+
+    self.start_time = get_time()
+    self.transferred = {}
+    self.prior_block = {}
+    self.totals = {}
+    self.throttles = {}
+
+    self.last_rotate = {}
+    self.rotate_mutex = {}
+    if layout:
+      self.AddThrottles(layout)
+
+  def AddThrottle(self, name, limit):
+    self.throttles[name] = limit
+    self.transferred[name] = {}
+    self.prior_block[name] = {}
+    self.totals[name] = {}
+    self.last_rotate[name] = self.get_time()
+    self.rotate_mutex[name] = threading.Lock()
+
+  def AddThrottles(self, layout):
+    for key, value in layout.iteritems():
+      self.AddThrottle(key, value)
+
+  def Register(self, thread):
+    """Register this thread with the throttler."""
+    thread_name = thread.getName()
+    for throttle_name in self.throttles.iterkeys():
+      self.transferred[throttle_name][thread_name] = 0
+      self.prior_block[throttle_name][thread_name] = 0
+      self.totals[throttle_name][thread_name] = 0
+
+  def VerifyName(self, throttle_name):
+    if throttle_name not in self.throttles:
+      raise AssertionError('%s is not a registered throttle' % throttle_name)
+
+  def AddTransfer(self, throttle_name, token_count):
+    """Add a count to the amount this thread has transferred.
+
+    Each time a thread transfers some data, it should call this method to
+    note the amount sent. The counts may be rotated if sufficient time
+    has passed since the last rotation.
+
+    Note: this method should only be called by the BulkLoaderThread
+    instances. The token count is allocated towards the
+    "current thread".
+
+    Args:
+      throttle_name: The name of the throttle to add to.
+      token_count: The number to add to the throttle counter.
+    """
+    self.VerifyName(throttle_name)
+    transferred = self.transferred[throttle_name]
+    transferred[threading.currentThread().getName()] += token_count
+
+    if self.last_rotate[throttle_name] + self.ROTATE_PERIOD < self.get_time():
+      self._RotateCounts(throttle_name)
+
+  def Sleep(self, throttle_name=None):
+    """Possibly sleep in order to limit the transfer rate.
+
+    Note that we sleep based on *prior* transfers rather than what we
+    may be about to transfer. The next transfer could put us under/over
+    and that will be rectified *after* that transfer. Net result is that
+    the average transfer rate will remain within bounds. Spiky behavior
+    or uneven rates among the threads could possibly bring the transfer
+    rate above the requested limit for short durations.
+
+    Args:
+      throttle_name: The name of the throttle to sleep on.  If None or
+        omitted, then sleep on all throttles.
+    """
+    if throttle_name is None:
+      for throttle_name in self.throttles:
+        self.Sleep(throttle_name=throttle_name)
+      return
+
+    self.VerifyName(throttle_name)
+
+    thread = threading.currentThread()
+
+    while True:
+      duration = self.get_time() - self.last_rotate[throttle_name]
+
+      total = 0
+      for count in self.prior_block[throttle_name].values():
+        total += count
+
+      if total:
+        duration += self.ROTATE_PERIOD
+
+      for count in self.transferred[throttle_name].values():
+        total += count
+
+      sleep_time = (float(total) / self.throttles[throttle_name]) - duration
+
+      if sleep_time < MINIMUM_THROTTLE_SLEEP_DURATION:
+        break
+
+      logging.debug('[%s] Throttling on %s. Sleeping for %.1f ms '
+                    '(duration=%.1f ms, total=%d)',
+                    thread.getName(), throttle_name,
+                    sleep_time * 1000, duration * 1000, total)
+      self.thread_sleep(sleep_time)
+      if thread.exit_flag:
+        break
+      self._RotateCounts(throttle_name)
+
+  def _RotateCounts(self, throttle_name):
+    """Rotate the transfer counters.
+
+    If sufficient time has passed, then rotate the counters from active to
+    the prior-block of counts.
+
+    This rotation is interlocked to ensure that multiple threads do not
+    over-rotate the counts.
+
+    Args:
+      throttle_name: The name of the throttle to rotate.
+    """
+    self.VerifyName(throttle_name)
+    self.rotate_mutex[throttle_name].acquire()
+    try:
+      next_rotate_time = self.last_rotate[throttle_name] + self.ROTATE_PERIOD
+      if next_rotate_time >= self.get_time():
+        return
+
+      for name, count in self.transferred[throttle_name].items():
+
+
+        self.prior_block[throttle_name][name] = count
+        self.transferred[throttle_name][name] = 0
+
+        self.totals[throttle_name][name] += count
+
+      self.last_rotate[throttle_name] = self.get_time()
+
+    finally:
+      self.rotate_mutex[throttle_name].release()
+
+  def TotalTransferred(self, throttle_name):
+    """Return the total transferred, and over what period.
+
+    Args:
+      throttle_name: The name of the throttle to total.
+
+    Returns:
+      A tuple of the total count and running time for the given throttle name.
+    """
+    total = 0
+    for count in self.totals[throttle_name].values():
+      total += count
+    for count in self.transferred[throttle_name].values():
+      total += count
+    return total, self.get_time() - self.start_time
+
+
+class _ThreadBase(threading.Thread):
+  """Provide some basic features for the threads used in the uploader.
+
+  This abstract base class is used to provide some common features:
+
+  * Flag to ask thread to exit as soon as possible.
+  * Record exit/error status for the primary thread to pick up.
+  * Capture exceptions and record them for pickup.
+  * Some basic logging of thread start/stop.
+  * All threads are "daemon" threads.
+  * Friendly names for presenting to users.
+
+  Concrete sub-classes must implement PerformWork().
+
+  Either self.NAME should be set or GetFriendlyName() be overridden to
+  return a human-friendly name for this thread.
+
+  The run() method starts the thread and prints start/exit messages.
+
+  self.exit_flag is intended to signal that this thread should exit
+  when it gets the chance.  PerformWork() should check self.exit_flag
+  whenever it has the opportunity to exit gracefully.
+  """
+
+  def __init__(self):
+    threading.Thread.__init__(self)
+
+    self.setDaemon(True)
+
+    self.exit_flag = False
+    self.error = None
+
+  def run(self):
+    """Perform the work of the thread."""
+    logging.info('[%s] %s: started', self.getName(), self.__class__.__name__)
+
+    try:
+      self.PerformWork()
+    except:
+      self.error = sys.exc_info()[1]
+      logging.exception('[%s] %s:', self.getName(), self.__class__.__name__)
+
+    logging.info('[%s] %s: exiting', self.getName(), self.__class__.__name__)
+
+  def PerformWork(self):
+    """Perform the thread-specific work."""
+    raise NotImplementedError()
+
+  def CheckError(self):
+    """If an error is present, then log it."""
+    if self.error:
+      logging.error('Error in %s: %s', self.GetFriendlyName(), self.error)
+
+  def GetFriendlyName(self):
+    """Returns a human-friendly description of the thread."""
+    if hasattr(self, 'NAME'):
+      return self.NAME
+    return 'unknown thread'
+
+
+class BulkLoaderThread(_ThreadBase):
+  """A thread which transmits entities to the server application.
+
+  This thread will read WorkItem instances from the work_queue and upload
+  the entities to the server application. Progress information will be
+  pushed into the progress_queue as the work is being performed.
+
+  If a BulkLoaderThread encounters a transient error, the entities will be
+  resent, if a fatal error is encoutered the BulkLoaderThread exits.
+  """
+
+  def __init__(self,
+               work_queue,
+               throttle,
+               thread_gate,
+               request_manager):
+    """Initialize the BulkLoaderThread instance.
+
+    Args:
+      work_queue: A queue containing WorkItems for processing.
+      throttle: A Throttles to control upload bandwidth.
+      thread_gate: A ThreadGate to control number of simultaneous uploads.
+      request_manager: A RequestManager instance.
+    """
+    _ThreadBase.__init__(self)
+
+    self.work_queue = work_queue
+    self.throttle = throttle
+    self.thread_gate = thread_gate
+
+    self.request_manager = request_manager
+
+  def PerformWork(self):
+    """Perform the work of a BulkLoaderThread."""
+    while not self.exit_flag:
+      success = False
+      self.thread_gate.StartWork()
+      try:
+        try:
+          item = self.work_queue.get(block=True, timeout=1.0)
+        except Queue.Empty:
+          continue
+        if item == _THREAD_SHOULD_EXIT:
+          break
+
+        logging.debug('[%s] Got work item [%d-%d]',
+                      self.getName(), item.key_start, item.key_end)
+
+        try:
+
+          item.MarkAsSending()
+          try:
+            if item.content is None:
+              item.content = self.request_manager.EncodeContent(item.rows)
+            try:
+              self.request_manager.PostEntities(item)
+              success = True
+              logging.debug(
+                  '[%d-%d] Sent %d entities',
+                  item.key_start, item.key_end, item.count)
+              self.throttle.AddTransfer(RECORDS, item.count)
+            except (db.InternalError, db.NotSavedError, db.Timeout), e:
+              logging.debug('Caught non-fatal error: %s', e)
+            except urllib2.HTTPError, e:
+              if e.code == 403 or (e.code >= 500 and e.code < 600):
+                logging.debug('Caught HTTP error %d', e.code)
+                logging.debug('%s', e.read())
+              else:
+                raise e
+
+          except:
+            self.error = sys.exc_info()[1]
+            logging.exception('[%s] %s: caught exception %s', self.getName(),
+                              self.__class__.__name__, str(sys.exc_info()))
+            raise
+
+        finally:
+          if success:
+            item.MarkAsSent()
+            self.thread_gate.IncreaseWorkers()
+            self.work_queue.task_done()
+          else:
+            item.MarkAsError()
+            self.thread_gate.DecreaseWorkers()
+            try:
+              self.work_queue.reput(item, block=False)
+            except Queue.Full:
+              logging.error('[%s] Failed to reput work item.', self.getName())
+              raise Error('Failed to reput work item')
+          logging.info('[%d-%d] %s',
+                       item.key_start, item.key_end, StateMessage(item.state))
+
+      finally:
+        self.thread_gate.FinishWork()
+
+
+  def GetFriendlyName(self):
+    """Returns a human-friendly name for this thread."""
+    return 'worker [%s]' % self.getName()
+
+
+class DataSourceThread(_ThreadBase):
+  """A thread which reads WorkItems and pushes them into queue.
+
+  This thread will read/consume WorkItems from a generator (produced by
+  the generator factory). These WorkItems will then be pushed into the
+  work_queue. Note that reading will block if/when the work_queue becomes
+  full. Information on content consumed from the generator will be pushed
+  into the progress_queue.
+  """
+
+  NAME = 'data source thread'
+
+  def __init__(self,
+               work_queue,
+               progress_queue,
+               workitem_generator_factory,
+               progress_generator_factory):
+    """Initialize the DataSourceThread instance.
+
+    Args:
+      work_queue: A queue containing WorkItems for processing.
+      progress_queue: A queue used for tracking progress information.
+      workitem_generator_factory: A factory that creates a WorkItem generator
+      progress_generator_factory: A factory that creates a generator which
+        produces prior progress status, or None if there is no prior status
+        to use.
+    """
+    _ThreadBase.__init__(self)
+
+    self.work_queue = work_queue
+    self.progress_queue = progress_queue
+    self.workitem_generator_factory = workitem_generator_factory
+    self.progress_generator_factory = progress_generator_factory
+    self.entity_count = 0
+
+  def PerformWork(self):
+    """Performs the work of a DataSourceThread."""
+    if self.progress_generator_factory:
+      progress_gen = self.progress_generator_factory()
+    else:
+      progress_gen = None
+
+    content_gen = self.workitem_generator_factory(self.progress_queue,
+                                                  progress_gen)
+
+    self.sent_count = 0
+    self.read_count = 0
+    self.read_all = False
+
+    for item in content_gen.Batches():
+      item.MarkAsRead()
+
+      while not self.exit_flag:
+        try:
+          self.work_queue.put(item, block=True, timeout=1.0)
+          self.entity_count += item.count
+          break
+        except Queue.Full:
+          pass
+
+      if self.exit_flag:
+        break
+
+    if not self.exit_flag:
+      self.read_all = True
+    self.read_count = content_gen.row_count
+    self.sent_count = content_gen.sent_count
+
+
+
+def _RunningInThread(thread):
+  """Return True if we are running within the specified thread."""
+  return threading.currentThread().getName() == thread.getName()
+
+
+class ProgressDatabase(object):
+  """Persistently record all progress information during an upload.
+
+  This class wraps a very simple SQLite database which records each of
+  the relevant details from the WorkItem instances. If the uploader is
+  resumed, then data is replayed out of the database.
+  """
+
+  def __init__(self, db_filename, commit_periodicity=100):
+    """Initialize the ProgressDatabase instance.
+
+    Args:
+      db_filename: The name of the SQLite database to use.
+      commit_periodicity: How many operations to perform between commits.
+    """
+    self.db_filename = db_filename
+
+    logging.info('Using progress database: %s', db_filename)
+    self.primary_conn = sqlite3.connect(db_filename, isolation_level=None)
+    self.primary_thread = threading.currentThread()
+
+    self.progress_conn = None
+    self.progress_thread = None
+
+    self.operation_count = 0
+    self.commit_periodicity = commit_periodicity
+
+    self.prior_key_end = None
+
+    try:
+      self.primary_conn.execute(
+          """create table progress (
+          id integer primary key autoincrement,
+          state integer not null,
+          key_start integer not null,
+          key_end integer not null
+          )
+          """)
+    except sqlite3.OperationalError, e:
+      if 'already exists' not in e.message:
+        raise
+
+    try:
+      self.primary_conn.execute('create index i_state on progress (state)')
+    except sqlite3.OperationalError, e:
+      if 'already exists' not in e.message:
+        raise
+
+  def ThreadComplete(self):
+    """Finalize any operations the progress thread has performed.
+
+    The database aggregates lots of operations into a single commit, and
+    this method is used to commit any pending operations as the thread
+    is about to shut down.
+    """
+    if self.progress_conn:
+      self._MaybeCommit(force_commit=True)
+
+  def _MaybeCommit(self, force_commit=False):
+    """Periodically commit changes into the SQLite database.
+
+    Committing every operation is quite expensive, and slows down the
+    operation of the script. Thus, we only commit after every N operations,
+    as determined by the self.commit_periodicity value. Optionally, the
+    caller can force a commit.
+
+    Args:
+      force_commit: Pass True in order for a commit to occur regardless
+        of the current operation count.
+    """
+    self.operation_count += 1
+    if force_commit or (self.operation_count % self.commit_periodicity) == 0:
+      self.progress_conn.commit()
+
+  def _OpenProgressConnection(self):
+    """Possibly open a database connection for the progress tracker thread.
+
+    If the connection is not open (for the calling thread, which is assumed
+    to be the progress tracker thread), then open it. We also open a couple
+    cursors for later use (and reuse).
+    """
+    if self.progress_conn:
+      return
+
+    assert not _RunningInThread(self.primary_thread)
+
+    self.progress_thread = threading.currentThread()
+
+    self.progress_conn = sqlite3.connect(self.db_filename)
+
+    self.insert_cursor = self.progress_conn.cursor()
+    self.update_cursor = self.progress_conn.cursor()
+
+  def HasUnfinishedWork(self):
+    """Returns True if the database has progress information.
+
+    Note there are two basic cases for progress information:
+    1) All saved records indicate a successful upload. In this case, we
+       need to skip everything transmitted so far and then send the rest.
+    2) Some records for incomplete transfer are present. These need to be
+       sent again, and then we resume sending after all the successful
+       data.
+
+    Returns:
+      True if the database has progress information, False otherwise.
+
+    Raises:
+      ResumeError: If there is an error reading the progress database.
+    """
+    assert _RunningInThread(self.primary_thread)
+
+    cursor = self.primary_conn.cursor()
+    cursor.execute('select count(*) from progress')
+    row = cursor.fetchone()
+    if row is None:
+      raise ResumeError('Error reading progress information.')
+
+    return row[0] != 0
+
+  def StoreKeys(self, key_start, key_end):
+    """Record a new progress record, returning a key for later updates.
+
+    The specified progress information will be persisted into the database.
+    A unique key will be returned that identifies this progress state. The
+    key is later used to (quickly) update this record.
+
+    For the progress resumption to proceed properly, calls to StoreKeys
+    MUST specify monotonically increasing key ranges. This will result in
+    a database whereby the ID, KEY_START, and KEY_END rows are all
+    increasing (rather than having ranges out of order).
+
+    NOTE: the above precondition is NOT tested by this method (since it
+    would imply an additional table read or two on each invocation).
+
+    Args:
+      key_start: The starting key of the WorkItem (inclusive)
+      key_end: The end key of the WorkItem (inclusive)
+
+    Returns:
+      A string to later be used as a unique key to update this state.
+    """
+    self._OpenProgressConnection()
+
+    assert _RunningInThread(self.progress_thread)
+    assert isinstance(key_start, int)
+    assert isinstance(key_end, int)
+    assert key_start <= key_end
+
+    if self.prior_key_end is not None:
+      assert key_start > self.prior_key_end
+    self.prior_key_end = key_end
+
+    self.insert_cursor.execute(
+        'insert into progress (state, key_start, key_end) values (?, ?, ?)',
+        (STATE_READ, key_start, key_end))
+
+    progress_key = self.insert_cursor.lastrowid
+
+    self._MaybeCommit()
+
+    return progress_key
+
+  def UpdateState(self, key, new_state):
+    """Update a specified progress record with new information.
+
+    Args:
+      key: The key for this progress record, returned from StoreKeys
+      new_state: The new state to associate with this progress record.
+    """
+    self._OpenProgressConnection()
+
+    assert _RunningInThread(self.progress_thread)
+    assert isinstance(new_state, int)
+
+    self.update_cursor.execute('update progress set state=? where id=?',
+                               (new_state, key))
+
+    self._MaybeCommit()
+
+  def GetProgressStatusGenerator(self):
+    """Get a generator which returns progress information.
+
+    The returned generator will yield a series of 4-tuples that specify
+    progress information about a prior run of the uploader. The 4-tuples
+    have the following values:
+
+      progress_key: The unique key to later update this record with new
+                    progress information.
+      state: The last state saved for this progress record.
+      key_start: The starting key of the items for uploading (inclusive).
+      key_end: The ending key of the items for uploading (inclusive).
+
+    After all incompletely-transferred records are provided, then one
+    more 4-tuple will be generated:
+
+      None
+      DATA_CONSUMED_TO_HERE: A unique string value indicating this record
+                             is being provided.
+      None
+      key_end: An integer value specifying the last data source key that
+               was handled by the previous run of the uploader.
+
+    The caller should begin uploading records which occur after key_end.
+
+    Yields:
+      Progress information as tuples (progress_key, state, key_start, key_end).
+    """
+    conn = sqlite3.connect(self.db_filename, isolation_level=None)
+    cursor = conn.cursor()
+
+    cursor.execute('select max(id) from progress')
+    batch_id = cursor.fetchone()[0]
+
+    cursor.execute('select key_end from progress where id = ?', (batch_id,))
+    key_end = cursor.fetchone()[0]
+
+    self.prior_key_end = key_end
+
+    cursor.execute(
+        'select id, state, key_start, key_end from progress'
+        '  where state != ?'
+        '  order by id',
+        (STATE_SENT,))
+
+    rows = cursor.fetchall()
+
+    for row in rows:
+      if row is None:
+        break
+
+      yield row
+
+    yield None, DATA_CONSUMED_TO_HERE, None, key_end
+
+
+class StubProgressDatabase(object):
+  """A stub implementation of ProgressDatabase which does nothing."""
+
+  def HasUnfinishedWork(self):
+    """Whether the stub database has progress information (it doesn't)."""
+    return False
+
+  def StoreKeys(self, unused_key_start, unused_key_end):
+    """Pretend to store a key in the stub database."""
+    return 'fake-key'
+
+  def UpdateState(self, unused_key, unused_new_state):
+    """Pretend to update the state of a progress item."""
+    pass
+
+  def ThreadComplete(self):
+    """Finalize operations on the stub database (i.e. do nothing)."""
+    pass
+
+
+class ProgressTrackerThread(_ThreadBase):
+  """A thread which records progress information for the upload process.
+
+  The progress information is stored into the provided progress database.
+  This class is not responsible for replaying a prior run's progress
+  information out of the database. Separate mechanisms must be used to
+  resume a prior upload attempt.
+  """
+
+  NAME = 'progress tracking thread'
+
+  def __init__(self, progress_queue, progress_db):
+    """Initialize the ProgressTrackerThread instance.
+
+    Args:
+      progress_queue: A Queue used for tracking progress information.
+      progress_db: The database for tracking progress information; should
+        be an instance of ProgressDatabase.
+    """
+    _ThreadBase.__init__(self)
+
+    self.progress_queue = progress_queue
+    self.db = progress_db
+    self.entities_sent = 0
+
+  def PerformWork(self):
+    """Performs the work of a ProgressTrackerThread."""
+    while not self.exit_flag:
+      try:
+        item = self.progress_queue.get(block=True, timeout=1.0)
+      except Queue.Empty:
+        continue
+      if item == _THREAD_SHOULD_EXIT:
+        break
+
+      if item.state == STATE_READ and item.progress_key is None:
+        item.progress_key = self.db.StoreKeys(item.key_start, item.key_end)
+      else:
+        assert item.progress_key is not None
+
+        self.db.UpdateState(item.progress_key, item.state)
+        if item.state == STATE_SENT:
+          self.entities_sent += item.count
+
+      item.progress_event.set()
+
+      self.progress_queue.task_done()
+
+    self.db.ThreadComplete()
+
+
+
+def Validate(value, typ):
+  """Checks that value is non-empty and of the right type.
+
+  Args:
+    value: any value
+    typ: a type or tuple of types
+
+  Raises:
+    ValueError if value is None or empty.
+    TypeError if it's not the given type.
+
+  """
+  if not value:
+    raise ValueError('Value should not be empty; received %s.' % value)
+  elif not isinstance(value, typ):
+    raise TypeError('Expected a %s, but received %s (a %s).' %
+                    (typ, value, value.__class__))
+
+
+class Loader(object):
+  """A base class for creating datastore entities from input data.
+
+  To add a handler for bulk loading a new entity kind into your datastore,
+  write a subclass of this class that calls Loader.__init__ from your
+  class's __init__.
+
+  If you need to run extra code to convert entities from the input
+  data, create new properties, or otherwise modify the entities before
+  they're inserted, override HandleEntity.
+
+  See the CreateEntity method for the creation of entities from the
+  (parsed) input data.
+  """
+
+  __loaders = {}
+  __kind = None
+  __properties = None
+
+  def __init__(self, kind, properties):
+    """Constructor.
+
+    Populates this Loader's kind and properties map. Also registers it with
+    the bulk loader, so that all you need to do is instantiate your Loader,
+    and the bulkload handler will automatically use it.
+
+    Args:
+      kind: a string containing the entity kind that this loader handles
+
+      properties: list of (name, converter) tuples.
+
+        This is used to automatically convert the CSV columns into
+        properties.  The converter should be a function that takes one
+        argument, a string value from the CSV file, and returns a
+        correctly typed property value that should be inserted. The
+        tuples in this list should match the columns in your CSV file,
+        in order.
+
+        For example:
+          [('name', str),
+           ('id_number', int),
+           ('email', datastore_types.Email),
+           ('user', users.User),
+           ('birthdate', lambda x: datetime.datetime.fromtimestamp(float(x))),
+           ('description', datastore_types.Text),
+           ]
+    """
+    Validate(kind, basestring)
+    self.__kind = kind
+
+    db.class_for_kind(kind)
+
+    Validate(properties, list)
+    for name, fn in properties:
+      Validate(name, basestring)
+      assert callable(fn), (
+        'Conversion function %s for property %s is not callable.' % (fn, name))
+
+    self.__properties = properties
+
+  @staticmethod
+  def RegisterLoader(loader):
+
+    Loader.__loaders[loader.__kind] = loader
+
+  def kind(self):
+    """ Return the entity kind that this Loader handes.
+    """
+    return self.__kind
+
+  def CreateEntity(self, values, key_name=None):
+    """Creates a entity from a list of property values.
+
+    Args:
+      values: list/tuple of str
+      key_name: if provided, the name for the (single) resulting entity
+
+    Returns:
+      list of db.Model
+
+      The returned entities are populated with the property values from the
+      argument, converted to native types using the properties map given in
+      the constructor, and passed through HandleEntity. They're ready to be
+      inserted.
+
+    Raises:
+      AssertionError if the number of values doesn't match the number
+        of properties in the properties map.
+      ValueError if any element of values is None or empty.
+      TypeError if values is not a list or tuple.
+    """
+    Validate(values, (list, tuple))
+    assert len(values) == len(self.__properties), (
+      'Expected %d CSV columns, found %d.' %
+      (len(self.__properties), len(values)))
+
+    model_class = db.class_for_kind(self.__kind)
+
+    properties = {'key_name': key_name}
+    for (name, converter), val in zip(self.__properties, values):
+      if converter is bool and val.lower() in ('0', 'false', 'no'):
+          val = False
+      properties[name] = converter(val)
+
+    entity = model_class(**properties)
+    entities = self.HandleEntity(entity)
+
+    if entities:
+      if not isinstance(entities, (list, tuple)):
+        entities = [entities]
+
+      for entity in entities:
+        if not isinstance(entity, db.Model):
+          raise TypeError('Expected a db.Model, received %s (a %s).' %
+                          (entity, entity.__class__))
+
+    return entities
+
+  def GenerateKey(self, i, values):
+    """Generates a key_name to be used in creating the underlying object.
+
+    The default implementation returns None.
+
+    This method can be overridden to control the key generation for
+    uploaded entities. The value returned should be None (to use a
+    server generated numeric key), or a string which neither starts
+    with a digit nor has the form __*__. (See
+    http://code.google.com/appengine/docs/python/datastore/keysandentitygroups.html)
+
+    If you generate your own string keys, keep in mind:
+
+    1. The key name for each entity must be unique.
+    2. If an entity of the same kind and key already exists in the
+       datastore, it will be overwritten.
+
+    Args:
+      i: Number corresponding to this object (assume it's run in a loop,
+        this is your current count.
+      values: list/tuple of str.
+
+    Returns:
+      A string to be used as the key_name for an entity.
+    """
+    return None
+
+  def HandleEntity(self, entity):
+    """Subclasses can override this to add custom entity conversion code.
+
+    This is called for each entity, after its properties are populated from
+    CSV but before it is stored. Subclasses can override this to add custom
+    entity handling code.
+
+    The entity to be inserted should be returned. If multiple entities should
+    be inserted, return a list of entities. If no entities should be inserted,
+    return None or [].
+
+    Args:
+      entity: db.Model
+
+    Returns:
+      db.Model or list of db.Model
+    """
+    return entity
+
+
+  @staticmethod
+  def RegisteredLoaders():
+    """Returns a list of the Loader instances that have been created.
+    """
+    return dict(Loader.__loaders)
+
+
+class QueueJoinThread(threading.Thread):
+  """A thread that joins a queue and exits.
+
+  Queue joins do not have a timeout.  To simulate a queue join with
+  timeout, run this thread and join it with a timeout.
+  """
+
+  def __init__(self, queue):
+    """Initialize a QueueJoinThread.
+
+    Args:
+      queue: The queue for this thread to join.
+    """
+    threading.Thread.__init__(self)
+    assert isinstance(queue, (Queue.Queue, ReQueue))
+    self.queue = queue
+
+  def run(self):
+    """Perform the queue join in this thread."""
+    self.queue.join()
+
+
+def InterruptibleQueueJoin(queue,
+                           thread_local,
+                           thread_gate,
+                           queue_join_thread_factory=QueueJoinThread):
+  """Repeatedly joins the given ReQueue or Queue.Queue with short timeout.
+
+  Between each timeout on the join, worker threads are checked.
+
+  Args:
+    queue: A Queue.Queue or ReQueue instance.
+    thread_local: A threading.local instance which indicates interrupts.
+    thread_gate: A ThreadGate instance.
+    queue_join_thread_factory: Used for dependency injection.
+
+  Returns:
+    True unless the queue join is interrupted by SIGINT or worker death.
+  """
+  thread = queue_join_thread_factory(queue)
+  thread.start()
+  while True:
+    thread.join(timeout=.5)
+    if not thread.isAlive():
+      return True
+    if thread_local.shut_down:
+      logging.debug('Queue join interrupted')
+      return False
+    for worker_thread in thread_gate.Threads():
+      if not worker_thread.isAlive():
+        return False
+
+
+def ShutdownThreads(data_source_thread, work_queue, thread_gate):
+  """Shuts down the worker and data source threads.
+
+  Args:
+    data_source_thread: A running DataSourceThread instance.
+    work_queue: The work queue.
+    thread_gate: A ThreadGate instance with workers registered.
+  """
+  logging.info('An error occurred. Shutting down...')
+
+  data_source_thread.exit_flag = True
+
+  for thread in thread_gate.Threads():
+    thread.exit_flag = True
+
+  for unused_thread in thread_gate.Threads():
+    thread_gate.EnableThread()
+
+  data_source_thread.join(timeout=3.0)
+  if data_source_thread.isAlive():
+    logging.warn('%s hung while trying to exit',
+                 data_source_thread.GetFriendlyName())
+
+  while not work_queue.empty():
+    try:
+      unused_item = work_queue.get_nowait()
+      work_queue.task_done()
+    except Queue.Empty:
+      pass
+
+
+def PerformBulkUpload(app_id,
+                      post_url,
+                      kind,
+                      workitem_generator_factory,
+                      num_threads,
+                      throttle,
+                      progress_db,
+                      max_queue_size=DEFAULT_QUEUE_SIZE,
+                      request_manager_factory=RequestManager,
+                      bulkloaderthread_factory=BulkLoaderThread,
+                      progresstrackerthread_factory=ProgressTrackerThread,
+                      datasourcethread_factory=DataSourceThread,
+                      work_queue_factory=ReQueue,
+                      progress_queue_factory=Queue.Queue):
+  """Uploads data into an application using a series of HTTP POSTs.
+
+  This function will spin up a number of threads to read entities from
+  the data source, pass those to a number of worker ("uploader") threads
+  for sending to the application, and track all of the progress in a
+  small database in case an error or pause/termination requires a
+  restart/resumption of the upload process.
+
+  Args:
+    app_id: String containing application id.
+    post_url: URL to post the Entity data to.
+    kind: Kind of the Entity records being posted.
+    workitem_generator_factory: A factory that creates a WorkItem generator.
+    num_threads: How many uploader threads should be created.
+    throttle: A Throttle instance.
+    progress_db: The database to use for replaying/recording progress.
+    max_queue_size: Maximum size of the queues before they should block.
+    request_manager_factory: Used for dependency injection.
+    bulkloaderthread_factory: Used for dependency injection.
+    progresstrackerthread_factory: Used for dependency injection.
+    datasourcethread_factory: Used for dependency injection.
+    work_queue_factory: Used for dependency injection.
+    progress_queue_factory: Used for dependency injection.
+
+  Raises:
+    AuthenticationError: If authentication is required and fails.
+  """
+  thread_gate = ThreadGate(True)
+
+  (unused_scheme,
+   host_port, url_path,
+   unused_query, unused_fragment) = urlparse.urlsplit(post_url)
+
+  work_queue = work_queue_factory(max_queue_size)
+  progress_queue = progress_queue_factory(max_queue_size)
+  request_manager = request_manager_factory(app_id,
+                                            host_port,
+                                            url_path,
+                                            kind,
+                                            throttle)
+
+  throttle.Register(threading.currentThread())
+  try:
+    request_manager.Authenticate()
+  except Exception, e:
+    logging.exception(e)
+    raise AuthenticationError('Authentication failed')
+  if (request_manager.credentials is not None and
+      not request_manager.authenticated):
+    raise AuthenticationError('Authentication failed')
+
+  for unused_idx in range(num_threads):
+    thread = bulkloaderthread_factory(work_queue,
+                                      throttle,
+                                      thread_gate,
+                                      request_manager)
+    throttle.Register(thread)
+    thread_gate.Register(thread)
+
+  progress_thread = progresstrackerthread_factory(progress_queue, progress_db)
+
+  if progress_db.HasUnfinishedWork():
+    logging.debug('Restarting upload using progress database')
+    progress_generator_factory = progress_db.GetProgressStatusGenerator
+  else:
+    progress_generator_factory = None
+
+  data_source_thread = datasourcethread_factory(work_queue,
+                                                progress_queue,
+                                                workitem_generator_factory,
+                                                progress_generator_factory)
+
+  thread_local = threading.local()
+  thread_local.shut_down = False
+
+  def Interrupt(unused_signum, unused_frame):
+    """Shutdown gracefully in response to a signal."""
+    thread_local.shut_down = True
+
+  signal.signal(signal.SIGINT, Interrupt)
+
+  progress_thread.start()
+  data_source_thread.start()
+  for thread in thread_gate.Threads():
+    thread.start()
+
+
+  while not thread_local.shut_down:
+    data_source_thread.join(timeout=0.25)
+
+    if data_source_thread.isAlive():
+      for thread in list(thread_gate.Threads()) + [progress_thread]:
+        if not thread.isAlive():
+          logging.info('Unexpected thread death: %s', thread.getName())
+          thread_local.shut_down = True
+          break
+    else:
+      break
+
+  if thread_local.shut_down:
+    ShutdownThreads(data_source_thread, work_queue, thread_gate)
+
+  def _Join(ob, msg):
+    logging.debug('Waiting for %s...', msg)
+    if isinstance(ob, threading.Thread):
+      ob.join(timeout=3.0)
+      if ob.isAlive():
+        logging.debug('Joining %s failed', ob.GetFriendlyName())
+      else:
+        logging.debug('... done.')
+    elif isinstance(ob, (Queue.Queue, ReQueue)):
+      if not InterruptibleQueueJoin(ob, thread_local, thread_gate):
+        ShutdownThreads(data_source_thread, work_queue, thread_gate)
+    else:
+      ob.join()
+      logging.debug('... done.')
+
+  _Join(work_queue, 'work_queue to flush')
+
+  for unused_thread in thread_gate.Threads():
+    work_queue.put(_THREAD_SHOULD_EXIT)
+
+  for unused_thread in thread_gate.Threads():
+    thread_gate.EnableThread()
+
+  for thread in thread_gate.Threads():
+    _Join(thread, 'thread [%s] to terminate' % thread.getName())
+
+    thread.CheckError()
+
+  if progress_thread.isAlive():
+    _Join(progress_queue, 'progress_queue to finish')
+  else:
+    logging.warn('Progress thread exited prematurely')
+
+  progress_queue.put(_THREAD_SHOULD_EXIT)
+  _Join(progress_thread, 'progress_thread to terminate')
+  progress_thread.CheckError()
+
+  data_source_thread.CheckError()
+
+  total_up, duration = throttle.TotalTransferred(BANDWIDTH_UP)
+  s_total_up, unused_duration = throttle.TotalTransferred(HTTPS_BANDWIDTH_UP)
+  total_up += s_total_up
+  logging.info('%d entites read, %d previously transferred',
+               data_source_thread.read_count,
+               data_source_thread.sent_count)
+  logging.info('%d entities (%d bytes) transferred in %.1f seconds',
+               progress_thread.entities_sent, total_up, duration)
+  if (data_source_thread.read_all and
+      progress_thread.entities_sent + data_source_thread.sent_count >=
+      data_source_thread.read_count):
+    logging.info('All entities successfully uploaded')
+  else:
+    logging.info('Some entities not successfully uploaded')
+
+
+def PrintUsageExit(code):
+  """Prints usage information and exits with a status code.
+
+  Args:
+    code: Status code to pass to sys.exit() after displaying usage information.
+  """
+  print __doc__ % {'arg0': sys.argv[0]}
+  sys.stdout.flush()
+  sys.stderr.flush()
+  sys.exit(code)
+
+
+def ParseArguments(argv):
+  """Parses command-line arguments.
+
+  Prints out a help message if -h or --help is supplied.
+
+  Args:
+    argv: List of command-line arguments.
+
+  Returns:
+    Tuple (url, filename, cookie, batch_size, kind) containing the values from
+    each corresponding command-line flag.
+  """
+  opts, unused_args = getopt.getopt(
+      argv[1:],
+      'h',
+      ['debug',
+       'help',
+       'url=',
+       'filename=',
+       'batch_size=',
+       'kind=',
+       'num_threads=',
+       'bandwidth_limit=',
+       'rps_limit=',
+       'http_limit=',
+       'db_filename=',
+       'app_id=',
+       'config_file=',
+       'auth_domain=',
+      ])
+
+  url = None
+  filename = None
+  batch_size = DEFAULT_BATCH_SIZE
+  kind = None
+  num_threads = DEFAULT_THREAD_COUNT
+  bandwidth_limit = DEFAULT_BANDWIDTH_LIMIT
+  rps_limit = DEFAULT_RPS_LIMIT
+  http_limit = DEFAULT_REQUEST_LIMIT
+  db_filename = None
+  app_id = None
+  config_file = None
+  auth_domain = 'gmail.com'
+
+  for option, value in opts:
+    if option == '--debug':
+      logging.getLogger().setLevel(logging.DEBUG)
+    elif option in ('-h', '--help'):
+      PrintUsageExit(0)
+    elif option == '--url':
+      url = value
+    elif option == '--filename':
+      filename = value
+    elif option == '--batch_size':
+      batch_size = int(value)
+    elif option == '--kind':
+      kind = value
+    elif option == '--num_threads':
+      num_threads = int(value)
+    elif option == '--bandwidth_limit':
+      bandwidth_limit = int(value)
+    elif option == '--rps_limit':
+      rps_limit = int(value)
+    elif option == '--http_limit':
+      http_limit = int(value)
+    elif option == '--db_filename':
+      db_filename = value
+    elif option == '--app_id':
+      app_id = value
+    elif option == '--config_file':
+      config_file = value
+    elif option == '--auth_domain':
+      auth_domain = value
+
+  return ProcessArguments(app_id=app_id,
+                          url=url,
+                          filename=filename,
+                          batch_size=batch_size,
+                          kind=kind,
+                          num_threads=num_threads,
+                          bandwidth_limit=bandwidth_limit,
+                          rps_limit=rps_limit,
+                          http_limit=http_limit,
+                          db_filename=db_filename,
+                          config_file=config_file,
+                          auth_domain=auth_domain,
+                          die_fn=lambda: PrintUsageExit(1))
+
+
+def ThrottleLayout(bandwidth_limit, http_limit, rps_limit):
+  return {
+      BANDWIDTH_UP: bandwidth_limit,
+      BANDWIDTH_DOWN: bandwidth_limit,
+      REQUESTS: http_limit,
+      HTTPS_BANDWIDTH_UP: bandwidth_limit / 5,
+      HTTPS_BANDWIDTH_DOWN: bandwidth_limit / 5,
+      HTTPS_REQUESTS: http_limit / 5,
+      RECORDS: rps_limit,
+  }
+
+
+def LoadConfig(config_file):
+  """Loads a config file and registers any Loader classes present."""
+  if config_file:
+    global_dict = dict(globals())
+    execfile(config_file, global_dict)
+    for cls in Loader.__subclasses__():
+      Loader.RegisterLoader(cls())
+
+
+def _MissingArgument(arg_name, die_fn):
+  """Print error message about missing argument and die."""
+  print >>sys.stderr, '%s argument required' % arg_name
+  die_fn()
+
+
+def ProcessArguments(app_id=None,
+                     url=None,
+                     filename=None,
+                     batch_size=DEFAULT_BATCH_SIZE,
+                     kind=None,
+                     num_threads=DEFAULT_THREAD_COUNT,
+                     bandwidth_limit=DEFAULT_BANDWIDTH_LIMIT,
+                     rps_limit=DEFAULT_RPS_LIMIT,
+                     http_limit=DEFAULT_REQUEST_LIMIT,
+                     db_filename=None,
+                     config_file=None,
+                     auth_domain='gmail.com',
+                     die_fn=lambda: sys.exit(1)):
+  """Processes non command-line input arguments."""
+  if db_filename is None:
+    db_filename = time.strftime('bulkloader-progress-%Y%m%d.%H%M%S.sql3')
+
+  if batch_size <= 0:
+    print >>sys.stderr, 'batch_size must be 1 or larger'
+    die_fn()
+
+  if url is None:
+    _MissingArgument('url', die_fn)
+
+  if filename is None:
+    _MissingArgument('filename', die_fn)
+
+  if kind is None:
+    _MissingArgument('kind', die_fn)
+
+  if config_file is None:
+    _MissingArgument('config_file', die_fn)
+
+  if app_id is None:
+    (unused_scheme, host_port, unused_url_path,
+     unused_query, unused_fragment) = urlparse.urlsplit(url)
+    suffix_idx = host_port.find('.appspot.com')
+    if suffix_idx > -1:
+      app_id = host_port[:suffix_idx]
+    elif host_port.split(':')[0].endswith('google.com'):
+      app_id = host_port.split('.')[0]
+    else:
+      print >>sys.stderr, 'app_id required for non appspot.com domains'
+      die_fn()
+
+  return (app_id, url, filename, batch_size, kind, num_threads,
+          bandwidth_limit, rps_limit, http_limit, db_filename, config_file,
+          auth_domain)
+
+
+def _PerformBulkload(app_id=None,
+                     url=None,
+                     filename=None,
+                     batch_size=DEFAULT_BATCH_SIZE,
+                     kind=None,
+                     num_threads=DEFAULT_THREAD_COUNT,
+                     bandwidth_limit=DEFAULT_BANDWIDTH_LIMIT,
+                     rps_limit=DEFAULT_RPS_LIMIT,
+                     http_limit=DEFAULT_REQUEST_LIMIT,
+                     db_filename=None,
+                     config_file=None,
+                     auth_domain='gmail.com'):
+  """Runs the bulkloader, given the options as keyword arguments.
+
+  Args:
+    app_id: The application id.
+    url: The url of the remote_api endpoint.
+    filename: The name of the file containing the CSV data.
+    batch_size: The number of records to send per request.
+    kind: The kind of entity to transfer.
+    num_threads: The number of threads to use to transfer data.
+    bandwidth_limit: Maximum bytes/second to transfers.
+    rps_limit: Maximum records/second to transfer.
+    http_limit: Maximum requests/second for transfers.
+    db_filename: The name of the SQLite3 progress database file.
+    config_file: The name of the configuration file.
+    auth_domain: The auth domain to use for logins and UserProperty.
+
+  Returns:
+    An exit code.
+  """
+  os.environ['AUTH_DOMAIN'] = auth_domain
+  LoadConfig(config_file)
+
+  throttle_layout = ThrottleLayout(bandwidth_limit, http_limit, rps_limit)
+
+  throttle = Throttle(layout=throttle_layout)
+
+
+  workitem_generator_factory = GetCSVGeneratorFactory(filename, batch_size)
+
+  if db_filename == 'skip':
+    progress_db = StubProgressDatabase()
+  else:
+    progress_db = ProgressDatabase(db_filename)
+
+
+  max_queue_size = max(DEFAULT_QUEUE_SIZE, 2 * num_threads + 5)
+
+  PerformBulkUpload(app_id,
+                    url,
+                    kind,
+                    workitem_generator_factory,
+                    num_threads,
+                    throttle,
+                    progress_db,
+                    max_queue_size=max_queue_size)
+
+  return 0
+
+
+def Run(app_id=None,
+        url=None,
+        filename=None,
+        batch_size=DEFAULT_BATCH_SIZE,
+        kind=None,
+        num_threads=DEFAULT_THREAD_COUNT,
+        bandwidth_limit=DEFAULT_BANDWIDTH_LIMIT,
+        rps_limit=DEFAULT_RPS_LIMIT,
+        http_limit=DEFAULT_REQUEST_LIMIT,
+        db_filename=None,
+        auth_domain='gmail.com',
+        config_file=None):
+  """Sets up and runs the bulkloader, given the options as keyword arguments.
+
+  Args:
+    app_id: The application id.
+    url: The url of the remote_api endpoint.
+    filename: The name of the file containing the CSV data.
+    batch_size: The number of records to send per request.
+    kind: The kind of entity to transfer.
+    num_threads: The number of threads to use to transfer data.
+    bandwidth_limit: Maximum bytes/second to transfers.
+    rps_limit: Maximum records/second to transfer.
+    http_limit: Maximum requests/second for transfers.
+    db_filename: The name of the SQLite3 progress database file.
+    config_file: The name of the configuration file.
+    auth_domain: The auth domain to use for logins and UserProperty.
+
+  Returns:
+    An exit code.
+  """
+  logging.basicConfig(
+      format='%(levelname)-8s %(asctime)s %(filename)s] %(message)s')
+  args = ProcessArguments(app_id=app_id,
+                          url=url,
+                          filename=filename,
+                          batch_size=batch_size,
+                          kind=kind,
+                          num_threads=num_threads,
+                          bandwidth_limit=bandwidth_limit,
+                          rps_limit=rps_limit,
+                          http_limit=http_limit,
+                          db_filename=db_filename,
+                          config_file=config_file)
+
+  (app_id, url, filename, batch_size, kind, num_threads, bandwidth_limit,
+   rps_limit, http_limit, db_filename, config_file, auth_domain) = args
+
+  return _PerformBulkload(app_id=app_id,
+                          url=url,
+                          filename=filename,
+                          batch_size=batch_size,
+                          kind=kind,
+                          num_threads=num_threads,
+                          bandwidth_limit=bandwidth_limit,
+                          rps_limit=rps_limit,
+                          http_limit=http_limit,
+                          db_filename=db_filename,
+                          config_file=config_file,
+                          auth_domain=auth_domain)
+
+
+def main(argv):
+  """Runs the importer from the command line."""
+  logging.basicConfig(
+      level=logging.INFO,
+      format='%(levelname)-8s %(asctime)s %(filename)s] %(message)s')
+
+  args = ParseArguments(argv)
+  if None in args:
+    print >>sys.stderr, 'Invalid arguments'
+    PrintUsageExit(1)
+
+  (app_id, url, filename, batch_size, kind, num_threads,
+   bandwidth_limit, rps_limit, http_limit, db_filename, config_file,
+   auth_domain) = args
+
+  return _PerformBulkload(app_id=app_id,
+                          url=url,
+                          filename=filename,
+                          batch_size=batch_size,
+                          kind=kind,
+                          num_threads=num_threads,
+                          bandwidth_limit=bandwidth_limit,
+                          rps_limit=rps_limit,
+                          http_limit=http_limit,
+                          db_filename=db_filename,
+                          config_file=config_file,
+                          auth_domain=auth_domain)
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver.py	Thu Feb 12 12:30:36 2009 +0000
@@ -83,6 +83,8 @@
 from google.appengine.api.capabilities import capability_stub
 from google.appengine.api.memcache import memcache_stub
 
+from google.appengine import dist
+
 from google.appengine.tools import dev_appserver_index
 from google.appengine.tools import dev_appserver_login
 
@@ -113,10 +115,12 @@
                        ('.wbmp', 'image/vnd.wap.wbmp')):
   mimetypes.add_type(mime_type, ext)
 
-MAX_RUNTIME_RESPONSE_SIZE = 1 << 20
+MAX_RUNTIME_RESPONSE_SIZE = 10 << 20
 
 MAX_REQUEST_SIZE = 10 * 1024 * 1024
 
+API_VERSION = '1'
+
 
 class Error(Exception):
   """Base-class for exceptions in this module."""
@@ -193,9 +197,31 @@
       outfile: File-like object where output data should be written.
       base_env_dict: Dictionary of CGI environment parameters if available.
         Defaults to None.
+
+    Returns:
+      None if request handling is complete.
+      Tuple (path, headers, input_file) for an internal redirect:
+        path: Path of URL to redirect to.
+        headers: Headers to send to other dispatcher.
+        input_file: New input to send to new dispatcher.
     """
     raise NotImplementedError
 
+  def EndRedirect(self, dispatched_output, original_output):
+    """Process the end of an internal redirect.
+
+    This method is called after all subsequent dispatch requests have finished.
+    By default the output from the dispatched process is copied to the original.
+
+    This will not be called on dispatchers that do not return an internal
+    redirect.
+
+    Args:
+      dispatched_output: StringIO buffer containing the results from the
+       dispatched
+    """
+    original_output.write(dispatched_output.read())
+
 
 class URLMatcher(object):
   """Matches an arbitrary URL using a list of URL patterns from an application.
@@ -346,12 +372,25 @@
                       'authorized to view this page.'
                       % (httplib.FORBIDDEN, email))
       else:
-        dispatcher.Dispatch(relative_url,
-                            matched_path,
-                            headers,
-                            infile,
-                            outfile,
-                            base_env_dict=base_env_dict)
+        forward = dispatcher.Dispatch(relative_url,
+                                      matched_path,
+                                      headers,
+                                      infile,
+                                      outfile,
+                                      base_env_dict=base_env_dict)
+
+        if forward:
+          new_path, new_headers, new_input = forward
+          logging.info('Internal redirection to %s' % new_path)
+          new_outfile = cStringIO.StringIO()
+          self.Dispatch(new_path,
+                        None,
+                        new_headers,
+                        new_input,
+                        new_outfile,
+                        dict(base_env_dict))
+          new_outfile.seek(0)
+          dispatcher.EndRedirect(new_outfile, outfile)
 
       return
 
@@ -514,11 +553,6 @@
   return env
 
 
-def FakeTemporaryFile(*args, **kwargs):
-  """Fake for tempfile.TemporaryFile that just uses StringIO."""
-  return cStringIO.StringIO()
-
-
 def NotImplementedFake(*args, **kwargs):
   """Fake for methods/functions that are not implemented in the production
   environment.
@@ -577,6 +611,27 @@
   return ('Linux', '', '', '', '')
 
 
+def FakeUnlink(path):
+  """Fake version of os.unlink."""
+  if os.path.isdir(path):
+    raise OSError(2, "Is a directory", path)
+  else:
+    raise OSError(1, "Operation not permitted", path)
+
+
+def FakeReadlink(path):
+  """Fake version of os.readlink."""
+  raise OSError(22, "Invalid argument", path)
+
+
+def FakeAccess(path, mode):
+  """Fake version of os.access where only reads are supported."""
+  if not os.path.exists(path) or mode != os.R_OK:
+    return False
+  else:
+    return True
+
+
 def FakeSetLocale(category, value=None, original_setlocale=locale.setlocale):
   """Fake version of locale.setlocale that only supports the default."""
   if value not in (None, '', 'C', 'POSIX'):
@@ -715,30 +770,74 @@
 
   ])
 
+  _original_file = file
+
+  _root_path = None
   _application_paths = None
-  _original_file = file
+  _skip_files = None
+  _static_file_config_matcher = None
+
+  _availability_cache = {}
 
   @staticmethod
-  def SetAllowedPaths(application_paths):
-    """Sets the root path of the application that is currently running.
+  def SetAllowedPaths(root_path, application_paths):
+    """Configures which paths are allowed to be accessed.
 
     Must be called at least once before any file objects are created in the
     hardened environment.
 
     Args:
-      root_path: Path to the root of the application.
+      root_path: Absolute path to the root of the application.
+      application_paths: List of additional paths that the application may
+                         access, this must include the App Engine runtime but
+                         not the Python library directories.
     """
     FakeFile._application_paths = (set(os.path.realpath(path)
                                        for path in application_paths) |
                                    set(os.path.abspath(path)
                                        for path in application_paths))
+    FakeFile._application_paths.add(root_path)
+
+    FakeFile._root_path = os.path.join(root_path, '')
+
+    FakeFile._availability_cache = {}
+
+  @staticmethod
+  def SetSkippedFiles(skip_files):
+    """Sets which files in the application directory are to be ignored.
+
+    Must be called at least once before any file objects are created in the
+    hardened environment.
+
+    Must be called whenever the configuration was updated.
+
+    Args:
+      skip_files: Object with .match() method (e.g. compiled regexp).
+    """
+    FakeFile._skip_files = skip_files
+    FakeFile._availability_cache = {}
+
+  @staticmethod
+  def SetStaticFileConfigMatcher(static_file_config_matcher):
+    """Sets StaticFileConfigMatcher instance for checking if a file is static.
+
+    Must be called at least once before any file objects are created in the
+    hardened environment.
+
+    Must be called whenever the configuration was updated.
+
+    Args:
+      static_file_config_matcher: StaticFileConfigMatcher instance.
+    """
+    FakeFile._static_file_config_matcher = static_file_config_matcher
+    FakeFile._availability_cache = {}
 
   @staticmethod
   def IsFileAccessible(filename, normcase=os.path.normcase):
     """Determines if a file's path is accessible.
 
-    SetAllowedPaths() must be called before this method or else all file
-    accesses will raise an error.
+    SetAllowedPaths(), SetSkippedFiles() and SetStaticFileConfigMatcher() must
+    be called before this method or else all file accesses will raise an error.
 
     Args:
       filename: Path of the file to check (relative or absolute). May be a
@@ -754,6 +853,40 @@
     if os.path.isdir(logical_filename):
       logical_filename = os.path.join(logical_filename, 'foo')
 
+    result = FakeFile._availability_cache.get(logical_filename)
+    if result is None:
+      result = FakeFile._IsFileAccessibleNoCache(logical_filename,
+                                                 normcase=normcase)
+      FakeFile._availability_cache[logical_filename] = result
+    return result
+
+  @staticmethod
+  def _IsFileAccessibleNoCache(logical_filename, normcase=os.path.normcase):
+    """Determines if a file's path is accessible.
+
+    This is an internal part of the IsFileAccessible implementation.
+
+    Args:
+      logical_filename: Absolute path of the file to check.
+      normcase: Used for dependency injection.
+
+    Returns:
+      True if the file is accessible, False otherwise.
+    """
+    if IsPathInSubdirectories(logical_filename, [FakeFile._root_path],
+                              normcase=normcase):
+      relative_filename = logical_filename[len(FakeFile._root_path):]
+
+      if FakeFile._skip_files.match(relative_filename):
+        logging.warning('Blocking access to skipped file "%s"',
+                        logical_filename)
+        return False
+
+      if FakeFile._static_file_config_matcher.IsStaticFile(relative_filename):
+        logging.warning('Blocking access to static file "%s"',
+                        logical_filename)
+        return False
+
     if logical_filename in FakeFile.ALLOWED_FILES:
       return True
 
@@ -887,8 +1020,6 @@
       indent = self._indent_level * '  '
       print >>sys.stderr, indent + (message % args)
 
-  EMPTY_MODULE_FILE = '<empty module>'
-
   _WHITE_LIST_C_MODULES = [
     'array',
     'binascii',
@@ -959,6 +1090,7 @@
 
 
     'os': [
+      'access',
       'altsep',
       'curdir',
       'defpath',
@@ -1007,6 +1139,8 @@
       'path',
       'pathsep',
       'R_OK',
+      'readlink',
+      'remove',
       'SEEK_CUR',
       'SEEK_END',
       'SEEK_SET',
@@ -1016,6 +1150,7 @@
       'stat_result',
       'strerror',
       'TMP_MAX',
+      'unlink',
       'urandom',
       'walk',
       'WCOREDUMP',
@@ -1032,46 +1167,23 @@
     ],
   }
 
-  _EMPTY_MODULES = [
-    'imp',
-    'ftplib',
-    'select',
-    'socket',
-    'tempfile',
-  ]
-
   _MODULE_OVERRIDES = {
     'locale': {
       'setlocale': FakeSetLocale,
     },
 
     'os': {
+      'access': FakeAccess,
       'listdir': RestrictedPathFunction(os.listdir),
 
       'lstat': RestrictedPathFunction(os.stat),
+      'readlink': FakeReadlink,
+      'remove': FakeUnlink,
       'stat': RestrictedPathFunction(os.stat),
       'uname': FakeUname,
+      'unlink': FakeUnlink,
       'urandom': FakeURandom,
     },
-
-    'socket': {
-      'AF_INET': None,
-      'SOCK_STREAM': None,
-      'SOCK_DGRAM': None,
-      '_GLOBAL_DEFAULT_TIMEOUT': getattr(socket, '_GLOBAL_DEFAULT_TIMEOUT',
-                                         None),
-    },
-
-    'tempfile': {
-      'TemporaryFile': FakeTemporaryFile,
-      'gettempdir': NotImplementedFake,
-      'gettempprefix': NotImplementedFake,
-      'mkdtemp': NotImplementedFake,
-      'mkstemp': NotImplementedFake,
-      'mktemp': NotImplementedFake,
-      'NamedTemporaryFile': NotImplementedFake,
-      'tempdir': NotImplementedFake,
-    },
   }
 
   _ENABLED_FILE_TYPES = (
@@ -1107,8 +1219,7 @@
   @Trace
   def find_module(self, fullname, path=None):
     """See PEP 302."""
-    if (fullname in ('cPickle', 'thread') or
-        fullname in HardenedModulesHook._EMPTY_MODULES):
+    if fullname in ('cPickle', 'thread'):
       return self
 
     search_path = path
@@ -1116,7 +1227,8 @@
     try:
       for index, current_module in enumerate(all_modules):
         current_module_fullname = '.'.join(all_modules[:index + 1])
-        if current_module_fullname == fullname:
+        if (current_module_fullname == fullname and not
+            self.StubModuleExists(fullname)):
           self.FindModuleRestricted(current_module,
                                     current_module_fullname,
                                     search_path)
@@ -1135,6 +1247,21 @@
 
     return self
 
+  def StubModuleExists(self, name):
+    """Check if the named module has a stub replacement."""
+    if name in sys.builtin_module_names:
+      name = 'py_%s' % name
+    if name in dist.__all__:
+      return True
+    return False
+
+  def ImportStubModule(self, name):
+    """Import the stub module replacement for the specified module."""
+    if name in sys.builtin_module_names:
+      name = 'py_%s' % name
+    module = __import__(dist.__name__, {}, {}, [name])
+    return getattr(module, name)
+
   @Trace
   def FixModule(self, module):
     """Prunes and overrides restricted module attributes.
@@ -1334,9 +1461,7 @@
     """
     module = self._imp.new_module(submodule_fullname)
 
-    if submodule_fullname in self._EMPTY_MODULES:
-      module.__file__ = self.EMPTY_MODULE_FILE
-    elif submodule_fullname == 'thread':
+    if submodule_fullname == 'thread':
       module.__dict__.update(self._dummy_thread.__dict__)
       module.__name__ = 'thread'
     elif submodule_fullname == 'cPickle':
@@ -1345,6 +1470,8 @@
     elif submodule_fullname == 'os':
       module.__dict__.update(self._os.__dict__)
       self._module_dict['os.path'] = module.path
+    elif self.StubModuleExists(submodule_fullname):
+      module = self.ImportStubModule(submodule_fullname)
     else:
       source_file, pathname, description = self.FindModuleRestricted(submodule, submodule_fullname, search_path)
       module = self.LoadModuleRestricted(submodule_fullname,
@@ -2004,7 +2131,7 @@
           path = entry.static_dir
           if path[-1] == '/':
             path = path[:-1]
-          regex = re.escape(path) + r'/(.*)'
+          regex = re.escape(path + os.path.sep) + r'(.*)'
 
         try:
           path_re = re.compile(regex)
@@ -2021,6 +2148,20 @@
 
         self._patterns.append((path_re, entry.mime_type, expiration))
 
+  def IsStaticFile(self, path):
+    """Tests if the given path points to a "static" file.
+
+    Args:
+      path: String containing the file's path relative to the app.
+
+    Returns:
+      Boolean, True if the file was configured to be static.
+    """
+    for (path_re, _, _) in self._patterns:
+      if path_re.match(path):
+        return True
+    return False
+
   def GetMimeType(self, path):
     """Returns the mime type that we should use when serving the specified file.
 
@@ -2143,8 +2284,25 @@
     ])
 
 
-def RewriteResponse(response_file):
-  """Interprets server-side headers and adjusts the HTTP response accordingly.
+def IgnoreHeadersRewriter(status_code, status_message, headers, body):
+  """Ignore specific response headers.
+
+  Certain response headers cannot be modified by an Application.  For a
+  complete list of these headers please see:
+
+    http://code.google.com/appengine/docs/webapp/responseclass.html#Disallowed_HTTP_Response_Headers
+
+  This rewriter simply removes those headers.
+  """
+  for h in _IGNORE_RESPONSE_HEADERS:
+    if h in headers:
+      del headers[h]
+
+  return status_code, status_message, headers, body
+
+
+def ParseStatusRewriter(status_code, status_message, headers, body):
+  """Parse status header, if it exists.
 
   Handles the server-side 'status' header, which instructs the server to change
   the HTTP response code accordingly. Handles the 'location' header, which
@@ -2154,12 +2312,113 @@
 
   If the 'status' header supplied by the client is invalid, this method will
   set the response to a 500 with an error message as content.
+  """
+  location_value = headers.getheader('location')
+  status_value = headers.getheader('status')
+  if status_value:
+    response_status = status_value
+    del headers['status']
+  elif location_value:
+    response_status = '%d Redirecting' % httplib.FOUND
+  else:
+    return status_code, status_message, headers, body
+
+  status_parts = response_status.split(' ', 1)
+  status_code, status_message = (status_parts + [''])[:2]
+  try:
+    status_code = int(status_code)
+  except ValueError:
+    status_code = 500
+    body = cStringIO.StringIO('Error: Invalid "status" header value returned.')
+
+  return status_code, status_message, headers, body
+
+
+def CacheRewriter(status_code, status_message, headers, body):
+  """Update the cache header."""
+  if not 'Cache-Control' in headers:
+    headers['Cache-Control'] = 'no-cache'
+  return status_code, status_message, headers, body
+
+
+def ContentLengthRewriter(status_code, status_message, headers, body):
+  """Rewrite the Content-Length header.
+
+  Even though Content-Length is not a user modifiable header, App Engine
+  sends a correct Content-Length to the user based on the actual response.
+  """
+  current_position = body.tell()
+  body.seek(0, 2)
+
+  headers['Content-Length'] = str(body.tell() - current_position)
+  body.seek(current_position)
+  return status_code, status_message, headers, body
+
+
+def CreateResponseRewritersChain():
+  """Create the default response rewriter chain.
+
+  A response rewriter is the a function that gets a final chance to change part
+  of the dev_appservers response.  A rewriter is not like a dispatcher in that
+  it is called after every request has been handled by the dispatchers
+  regardless of which dispatcher was used.
+
+  The order in which rewriters are registered will be the order in which they
+  are used to rewrite the response.  Modifications from earlier rewriters
+  are used as input to later rewriters.
+
+  A response rewriter is a function that can rewrite the request in any way.
+  Thefunction can returned modified values or the original values it was
+  passed.
+
+  A rewriter function has the following parameters and return values:
+
+    Args:
+      status_code: Status code of response from dev_appserver or previous
+        rewriter.
+      status_message: Text corresponding to status code.
+      headers: mimetools.Message instance with parsed headers.  NOTE: These
+        headers can contain its own 'status' field, but the default
+        dev_appserver implementation will remove this.  Future rewriters
+        should avoid re-introducing the status field and return new codes
+        instead.
+      body: File object containing the body of the response.  This position of
+        this file may not be at the start of the file.  Any content before the
+        files position is considered not to be part of the final body.
+
+     Returns:
+      status_code: Rewritten status code or original.
+      status_message: Rewritter message or original.
+      headers: Rewritten/modified headers or original.
+      body: Rewritten/modified body or original.
+
+  Returns:
+    List of response rewriters.
+  """
+  return [IgnoreHeadersRewriter,
+          ParseStatusRewriter,
+          CacheRewriter,
+          ContentLengthRewriter,
+  ]
+
+
+def RewriteResponse(response_file, response_rewriters=None):
+  """Allows final rewrite of dev_appserver response.
+
+  This function receives the unparsed HTTP response from the application
+  or internal handler, parses out the basic structure and feeds that structure
+  in to a chain of response rewriters.
+
+  It also makes sure the final HTTP headers are properly terminated.
+
+  For more about response rewriters, please see documentation for
+  CreateResponeRewritersChain.
 
   Args:
     response_file: File-like object containing the full HTTP response including
       the response code, all headers, and the request body.
-    gmtime: Function which returns current time in a format matching standard
-      time.gmtime().
+    response_rewriters: A list of response rewriters.  If none is provided it
+      will create a new chain using CreateResponseRewritersChain.
 
   Returns:
     Tuple (status_code, status_message, header, body) where:
@@ -2170,36 +2429,19 @@
         a trailing new-line (CRLF).
       body: String containing the body of the response.
   """
+  if response_rewriters is None:
+    response_rewriters = CreateResponseRewritersChain()
+
+  status_code = 200
+  status_message = 'Good to go'
   headers = mimetools.Message(response_file)
 
-  for h in _IGNORE_RESPONSE_HEADERS:
-    if h in headers:
-      del headers[h]
-
-  response_status = '%d Good to go' % httplib.OK
-
-  location_value = headers.getheader('location')
-  status_value = headers.getheader('status')
-  if status_value:
-    response_status = status_value
-    del headers['status']
-  elif location_value:
-    response_status = '%d Redirecting' % httplib.FOUND
-
-  if not 'Cache-Control' in headers:
-    headers['Cache-Control'] = 'no-cache'
-
-  status_parts = response_status.split(' ', 1)
-  status_code, status_message = (status_parts + [''])[:2]
-  try:
-    status_code = int(status_code)
-  except ValueError:
-    status_code = 500
-    body = 'Error: Invalid "status" header value returned.'
-  else:
-    body = response_file.read()
-
-  headers['Content-Length'] = str(len(body))
+  for response_rewriter in response_rewriters:
+    status_code, status_message, headers, response_file = response_rewriter(
+        status_code,
+        status_message,
+        headers,
+        response_file)
 
   header_list = []
   for header in headers.headers:
@@ -2208,7 +2450,7 @@
     header_list.append(header)
 
   header_data = '\r\n'.join(header_list) + '\r\n'
-  return status_code, status_message, header_data, body
+  return status_code, status_message, header_data, response_file.read()
 
 
 class ModuleManager(object):
@@ -2245,7 +2487,7 @@
       __file__ attribute, None will be returned.
       """
     module_file = getattr(module, '__file__', None)
-    if not module_file or module_file == HardenedModulesHook.EMPTY_MODULE_FILE:
+    if module_file is None:
       return None
 
     source_file = module_file[:module_file.rfind('py') + 2]
@@ -2309,7 +2551,9 @@
     template_module.template_cache.clear()
 
 
-def CreateRequestHandler(root_path, login_url, require_indexes=False,
+def CreateRequestHandler(root_path,
+                         login_url,
+                         require_indexes=False,
                          static_caching=True):
   """Creates a new BaseHTTPRequestHandler sub-class for use with the Python
   BaseHTTPServer module's HTTP server.
@@ -2359,6 +2603,8 @@
 
     config_cache = application_config_cache
 
+    rewriter_chain = CreateResponseRewritersChain()
+
     def __init__(self, *args, **kwargs):
       """Initializer.
 
@@ -2432,6 +2678,10 @@
         config, explicit_matcher = LoadAppConfig(root_path, self.module_dict,
                                                  cache=self.config_cache,
                                                  static_caching=static_caching)
+        if config.api_version != API_VERSION:
+          logging.error("API versions cannot be switched dynamically: %r != %r"
+                        % (config.api_version, API_VERSION))
+          sys.exit(1)
         env_dict['CURRENT_VERSION_ID'] = config.version + ".1"
         env_dict['APPLICATION_ID'] = config.application
         dispatcher = MatcherDispatcher(login_url,
@@ -2465,7 +2715,7 @@
         outfile.flush()
         outfile.seek(0)
 
-        status_code, status_message, header_data, body = RewriteResponse(outfile)
+        status_code, status_message, header_data, body = RewriteResponse(outfile, self.rewriter_chain)
 
         runtime_response_size = len(outfile.getvalue())
         if runtime_response_size > MAX_RUNTIME_RESPONSE_SIZE:
@@ -2582,8 +2832,13 @@
   url_matcher = create_url_matcher()
   path_adjuster = create_path_adjuster(root_path)
   cgi_dispatcher = create_cgi_dispatcher(module_dict, root_path, path_adjuster)
+  static_file_config_matcher = StaticFileConfigMatcher(url_map_list,
+                                                       path_adjuster,
+                                                       default_expiration)
   file_dispatcher = create_file_dispatcher(path_adjuster,
-      StaticFileConfigMatcher(url_map_list, path_adjuster, default_expiration))
+                                           static_file_config_matcher)
+
+  FakeFile.SetStaticFileConfigMatcher(static_file_config_matcher)
 
   for url_map in url_map_list:
     admin_only = url_map.login == appinfo.LOGIN_ADMIN
@@ -2687,6 +2942,8 @@
                                  module_dict,
                                  default_expiration)
 
+        FakeFile.SetSkippedFiles(config.skip_files)
+
         if cache is not None:
           cache.path = appinfo_path
           cache.config = config
@@ -2868,9 +3125,15 @@
                  serve_address='',
                  require_indexes=False,
                  static_caching=True,
-                 python_path_list=sys.path):
+                 python_path_list=sys.path,
+                 sdk_dir=os.path.dirname(os.path.dirname(google.__file__))):
   """Creates an new HTTPServer for an application.
 
+  The sdk_dir argument must be specified for the directory storing all code for
+  the SDK so as to allow for the sandboxing of module access to work for any
+  and all SDK code. While typically this is where the 'google' package lives,
+  it can be in another location because of API version support.
+
   Args:
     root_path: String containing the path to the root directory of the
       application where the app.yaml file is.
@@ -2882,6 +3145,7 @@
     require_indexes: True if index.yaml is read-only gospel; default False.
     static_caching: True if browser caching of static files should be allowed.
     python_path_list: Used for dependency injection.
+    sdk_dir: Directory where the SDK is stored.
 
   Returns:
     Instance of BaseHTTPServer.HTTPServer that's ready to start accepting.
@@ -2889,12 +3153,14 @@
   absolute_root_path = os.path.realpath(root_path)
 
   SetupTemplates(template_dir)
-  FakeFile.SetAllowedPaths([absolute_root_path,
-                            os.path.dirname(os.path.dirname(google.__file__)),
+  FakeFile.SetAllowedPaths(absolute_root_path,
+                           [sdk_dir,
                             template_dir])
 
-  handler_class = CreateRequestHandler(absolute_root_path, login_url,
-                                       require_indexes, static_caching)
+  handler_class = CreateRequestHandler(absolute_root_path,
+                                       login_url,
+                                       require_indexes,
+                                       static_caching)
 
   if absolute_root_path not in python_path_list:
     python_path_list.insert(0, absolute_root_path)
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py	Thu Feb 12 12:30:36 2009 +0000
@@ -65,14 +65,24 @@
 import getopt
 import logging
 import os
+import re
 import sys
 import traceback
 import tempfile
 
-from google.appengine.api import yaml_errors
-from google.appengine.tools import appcfg
-from google.appengine.tools import appengine_rpc
-from google.appengine.tools import dev_appserver
+
+def SetGlobals():
+  """Set various global variables involving the 'google' package.
+
+  This function should not be called until sys.path has been properly set.
+  """
+  global yaml_errors, appcfg, appengine_rpc, dev_appserver, os_compat
+  from google.appengine.api import yaml_errors
+  from google.appengine.tools import appcfg
+  from google.appengine.tools import appengine_rpc
+  from google.appengine.tools import dev_appserver
+  from google.appengine.tools import os_compat
+
 
 
 DEFAULT_ADMIN_CONSOLE_SERVER = 'appengine.google.com'
@@ -98,9 +108,13 @@
 ARG_STATIC_CACHING = 'static_caching'
 ARG_TEMPLATE_DIR = 'template_dir'
 
-
-BASE_PATH = os.path.abspath(
-  os.path.join(os.path.dirname(dev_appserver.__file__), '../../../'))
+SDK_PATH = os.path.dirname(
+             os.path.dirname(
+               os.path.dirname(
+                 os.path.dirname(os_compat.__file__)
+               )
+             )
+           )
 
 DEFAULT_ARGS = {
   ARG_PORT: 8080,
@@ -112,7 +126,7 @@
   ARG_LOGIN_URL: '/_ah/login',
   ARG_CLEAR_DATASTORE: False,
   ARG_REQUIRE_INDEXES: False,
-  ARG_TEMPLATE_DIR: os.path.join(BASE_PATH, 'templates'),
+  ARG_TEMPLATE_DIR: os.path.join(SDK_PATH, 'templates'),
   ARG_SMTP_HOST: '',
   ARG_SMTP_PORT: 25,
   ARG_SMTP_USER: '',
@@ -126,6 +140,74 @@
   ARG_STATIC_CACHING: True,
 }
 
+API_PATHS = {'1':
+             {'google': (),
+              'antlr3': ('lib', 'antlr3'),
+              'django': ('lib', 'django'),
+              'webob': ('lib', 'webob'),
+              'yaml': ('lib', 'yaml', 'lib'),
+              }
+             }
+
+DEFAULT_API_VERSION = '1'
+
+API_PATHS['test'] = API_PATHS[DEFAULT_API_VERSION].copy()
+API_PATHS['test']['_test'] = ('nonexistent', 'test', 'path')
+
+
+def SetPaths(app_config_path):
+  """Set the interpreter to use the specified API version.
+
+  The app.yaml file is scanned for the api_version field and the value is
+  extracted. With that information, the paths in API_PATHS are added to the
+  front of sys.paths to make sure that they take precedent over any other paths
+  to older versions of a package. All modules for each package set are cleared
+  out of sys.modules to make sure only the newest version is used.
+
+  Args:
+    - app_config_path: Path to the app.yaml file.
+  """
+  api_version_re = re.compile(r'api_version:\s*(?P<api_version>[\w.]{1,32})')
+  api_version = None
+  app_config_file = open(app_config_path, 'r')
+  try:
+    for line in app_config_file:
+      re_match = api_version_re.match(line)
+      if re_match:
+        api_version = re_match.group('api_version')
+        break
+  finally:
+    app_config_file.close()
+
+  if api_version is None:
+    logging.error("Application configuration file missing an 'api_version' "
+                  "value:\n%s" % app_config_path)
+    sys.exit(1)
+  if api_version not in API_PATHS:
+    logging.error("Value of %r for 'api_version' from the application "
+                  "configuration file is not valid:\n%s" %
+                    (api_version, app_config_path))
+    sys.exit(1)
+
+  if api_version == DEFAULT_API_VERSION:
+    return DEFAULT_API_VERSION
+
+  sdk_path = os.path.dirname(
+      os.path.dirname(
+        os.path.dirname(
+          os.path.dirname(os_compat.__file__)
+          )
+        )
+      )
+  for pkg_name, path_parts in API_PATHS[api_version].iteritems():
+    for name in sys.modules.keys():
+      if name == pkg_name or name.startswith('%s.' % pkg_name):
+        del sys.modules[name]
+    pkg_path = os.path.join(sdk_path, *path_parts)
+    sys.path.insert(0, pkg_path)
+
+  return api_version
+
 
 def PrintUsageExit(code):
   """Prints usage information and exits with a status code.
@@ -205,10 +287,10 @@
       option_dict[ARG_ADDRESS] = value
 
     if option == '--datastore_path':
-      option_dict[ARG_DATASTORE_PATH] = value
+      option_dict[ARG_DATASTORE_PATH] = os.path.abspath(value)
 
     if option == '--history_path':
-      option_dict[ARG_HISTORY_PATH] = value
+      option_dict[ARG_HISTORY_PATH] = os.path.abspath(value)
 
     if option in ('-c', '--clear_datastore'):
       option_dict[ARG_CLEAR_DATASTORE] = True
@@ -241,10 +323,10 @@
       option_dict[ARG_SHOW_MAIL_BODY] = True
 
     if option == '--auth_domain':
-      dev_appserver.DEFAULT_ENV['AUTH_DOMAIN'] = value
+      option_dict['_DEFAULT_ENV_AUTH_DOMAIN'] = value
 
     if option == '--debug_imports':
-      dev_appserver.HardenedModulesHook.ENABLE_LOGGING = True
+      option_dict['_ENABLE_LOGGING'] = True
 
     if option == '--template_dir':
       option_dict[ARG_TEMPLATE_DIR] = value
@@ -291,6 +373,25 @@
     PrintUsageExit(1)
 
   root_path = args[0]
+  for suffix in ('yaml', 'yml'):
+    path = os.path.join(root_path, 'app.%s' % suffix)
+    if os.path.exists(path):
+      api_version = SetPaths(path)
+      break
+  else:
+    logging.error("Application configuration file not found in %s" % root_path)
+    return 1
+
+  SetGlobals()
+  dev_appserver.API_VERSION = api_version
+
+  if '_DEFAULT_ENV_AUTH_DOMAIN' in option_dict:
+    auth_domain = option_dict['_DEFAULT_ENV_AUTH_DOMAIN']
+    dev_appserver.DEFAULT_ENV['AUTH_DOMAIN'] = auth_domain
+  if '_ENABLE_LOGGING' in option_dict:
+    enable_logging = option_dict['_ENABLE_LOGGING']
+    dev_appserver.HardenedModulesHook.ENABLE_LOGGING = enable_logging
+
   log_level = option_dict[ARG_LOG_LEVEL]
   port = option_dict[ARG_PORT]
   datastore_path = option_dict[ARG_DATASTORE_PATH]
@@ -335,6 +436,7 @@
                                            login_url,
                                            port,
                                            template_dir,
+                                           sdk_dir=SDK_PATH,
                                            serve_address=serve_address,
                                            require_indexes=require_indexes,
                                            static_caching=static_caching)
@@ -359,3 +461,5 @@
 
 if __name__ == '__main__':
   sys.exit(main(sys.argv))
+else:
+  SetGlobals()
--- a/thirdparty/google_appengine/lib/antlr3/antlr3/__init__.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr3/__init__.py	Thu Feb 12 12:30:36 2009 +0000
@@ -138,7 +138,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-__version__ = '3.1'
+__version__ = '3.1.1'
 
 def version_str_to_tuple(version_str):
     import re
--- a/thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/PKG-INFO	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/antlr_python_runtime.egg-info/PKG-INFO	Thu Feb 12 12:30:36 2009 +0000
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: antlr-python-runtime
-Version: 3.1
+Version: 3.1.1
 Summary: Runtime package for ANTLR3
 Home-page: http://www.antlr.org/
 Author: Benjamin Niemann
--- a/thirdparty/google_appengine/lib/antlr3/setup.py	Thu Feb 12 10:24:37 2009 +0000
+++ b/thirdparty/google_appengine/lib/antlr3/setup.py	Thu Feb 12 12:30:36 2009 +0000
@@ -267,7 +267,7 @@
             
 
 setup(name='antlr_python_runtime',
-      version='3.1',
+      version='3.1.1',
       packages=['antlr3'],
 
       author="Benjamin Niemann",