Load /Users/solydzajs/Desktop/google_appengine into
authorPawel Solyga <Pawel.Solyga@gmail.com>
Fri, 24 Apr 2009 14:16:00 +0000
changeset 2273 e4cb9c53db3e
parent 2272 26491ee91e33
child 2274 1885cda38bb7
Load /Users/solydzajs/Desktop/google_appengine into trunk/thirdparty/google_appengine.
thirdparty/google_appengine/RELEASE_NOTES
thirdparty/google_appengine/VERSION
thirdparty/google_appengine/google/appengine/api/appinfo.py
thirdparty/google_appengine/google/appengine/api/croninfo.py
thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py
thirdparty/google_appengine/google/appengine/api/datastore_types.py
thirdparty/google_appengine/google/appengine/api/images/__init__.py
thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py
thirdparty/google_appengine/google/appengine/api/images/images_stub.py
thirdparty/google_appengine/google/appengine/api/mail.py
thirdparty/google_appengine/google/appengine/api/memcache/__init__.py
thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py
thirdparty/google_appengine/google/appengine/api/memcache/memcache_stub.py
thirdparty/google_appengine/google/appengine/api/namespace_manager/__init__.py
thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py
thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py
thirdparty/google_appengine/google/appengine/api/users.py
thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py
thirdparty/google_appengine/google/appengine/datastore/entity_pb.py
thirdparty/google_appengine/google/appengine/dist/py_imp.py
thirdparty/google_appengine/google/appengine/dist/py_zipimport.py
thirdparty/google_appengine/google/appengine/ext/admin/__init__.py
thirdparty/google_appengine/google/appengine/ext/gql/__init__.py
thirdparty/google_appengine/google/appengine/ext/remote_api/handler.py
thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py
thirdparty/google_appengine/google/appengine/ext/webapp/__init__.py
thirdparty/google_appengine/google/appengine/tools/appcfg.py
thirdparty/google_appengine/google/appengine/tools/appengine_rpc.py
thirdparty/google_appengine/google/appengine/tools/bulkloader.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver_login.py
thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py
thirdparty/google_appengine/google/appengine/tools/os_compat.py
--- a/thirdparty/google_appengine/RELEASE_NOTES	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/RELEASE_NOTES	Fri Apr 24 14:16:00 2009 +0000
@@ -1,17 +1,49 @@
 Copyright 2008 Google Inc.
 All rights reserved.
 
-App Engine SDK - Release Notes
+App Engine Python SDK - Release Notes
+
+Version 1.2.1 - April 13, 2009
+=============================
+
+  - Stable, unique IDs for User objects. The Users service now
+    provides a unique user_id for each user that stays the same even
+    if a user changes her email address.
+      http://code.google.com/p/googleappengine/issues/detail?id=1019
+  - The Images API now supports compositing images and calculating
+    a color histogram for an image.
+  - New allowed mail attachment types: ics, vcf
+      http://code.google.com/p/googleappengine/issues/detail?id=494
+  - Urlfetch requests can now set the User-Agent header.
+      http://code.google.com/p/googleappengine/issues/detail?id=342
+  - An App Engine-specific version of the Python PyCrypto cryptography
+    library is now available. Learn more at
+      http://code.google.com/appengine/docs/python/tools/libraries.html
+  - The bulk loader configuration format has changed.to allow non-CSV
+    input. This change is not backwards compatible, so you will need to
+    update your code.
+    An early release of the bulk downloader is also now available in
+    bulkloader.py. Learn more about these changes at:
+      http://code.google.com/appengine/docs/python/tools/uploadingdata.html
+  - Fixed parsing of unicode GQL queries.
+      http://code.google.com/p/googleappengine/issues/detail?id=1105
+  - Fixed dev_appserver security restrictions for os.path
+      http://code.google.com/p/googleappengine/issues/detail?id=1068
+  - Fixed Reply-To header set in emails sent from dev_appserver.
+      http://code.google.com/p/googleappengine/issues/detail?id=1017
+
+
 
 Version 1.2.0 - March 24, 2009
 ==============================
   - Cron support. Appcfg.py will upload the schedule to App Engine.
       The dev_appserver console at /_ah/admin describes your schedule but does
       not automatically run scheduled jobs. Learn more at
-      http://code.google.com/appengine/docs/python/config/cron.html
   - New allow_skipped_files flag in dev_appserver to allow it to read files
     which are not available in App Engine.
-        http://code.google.com/p/googleappengine/issues/detail?id=550
+      http://code.google.com/p/googleappengine/issues/detail?id=550
+  - New upload_data command in appcfg to run the bulk uploader.
+      http://code.google.com/appengine/docs/python/tools/uploadingdata.html
 
 Version 1.1.9 - February 2, 2009
 ================================
--- a/thirdparty/google_appengine/VERSION	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/VERSION	Fri Apr 24 14:16:00 2009 +0000
@@ -1,3 +1,3 @@
-release: "1.2.0"
-timestamp: 1236791960
+release: "1.2.1"
+timestamp: 1238791978
 api_versions: ['1']
--- a/thirdparty/google_appengine/google/appengine/api/appinfo.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/appinfo.py	Fri Apr 24 14:16:00 2009 +0000
@@ -70,6 +70,8 @@
 SECURE_HTTPS = 'always'
 SECURE_HTTP_OR_HTTPS = 'optional'
 
+REQUIRE_MATCHING_FILE = 'require_matching_file'
+
 DEFAULT_SKIP_FILES = (r"^(.*/)?("
                       r"(app\.yaml)|"
                       r"(app\.yml)|"
@@ -199,13 +201,16 @@
 
 
     HANDLER_SCRIPT: validation.Optional(_FILES_REGEX),
+
+    REQUIRE_MATCHING_FILE: validation.Optional(bool),
   }
 
   COMMON_FIELDS = set([URL, LOGIN, SECURE])
 
   ALLOWED_FIELDS = {
-    HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION),
-    HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION),
+    HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
+                           REQUIRE_MATCHING_FILE),
+    HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE),
     HANDLER_SCRIPT: (),
   }
 
--- a/thirdparty/google_appengine/google/appengine/api/croninfo.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/croninfo.py	Fri Apr 24 14:16:00 2009 +0000
@@ -79,9 +79,9 @@
     except IOError:
       return value
     except:
-      e, v, t = sys.exc_info()
-      logging.warning("pytz raised an unexpected error: %s.\n" % (v) +
-                      "Traceback:\n" + "\n".join(traceback.format_tb(t)))
+      unused_e, v, t = sys.exc_info()
+      logging.warning('pytz raised an unexpected error: %s.\n' % (v) +
+                      'Traceback:\n' + '\n'.join(traceback.format_tb(t)))
       raise
     return value
 
--- a/thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/datastore_file_stub.py	Fri Apr 24 14:16:00 2009 +0000
@@ -37,6 +37,7 @@
 
 import datetime
 import logging
+import md5
 import os
 import struct
 import sys
@@ -145,7 +146,8 @@
                datastore_file,
                history_file,
                require_indexes=False,
-               service_name='datastore_v3'):
+               service_name='datastore_v3',
+               trusted=False):
     """Constructor.
 
     Initializes and loads the datastore from the backing files, if they exist.
@@ -159,6 +161,8 @@
       require_indexes: bool, default False.  If True, composite indexes must
           exist in index.yaml for queries that need them.
       service_name: Service name expected for all calls.
+      trusted: bool, default False.  If True, this stub allows an app to
+        access the data of another app.
     """
     super(DatastoreFileStub, self).__init__(service_name)
 
@@ -167,6 +171,7 @@
     self.__app_id = app_id
     self.__datastore_file = datastore_file
     self.__history_file = history_file
+    self.SetTrusted(trusted)
 
     self.__entities = {}
 
@@ -207,6 +212,31 @@
     self.__query_history = {}
     self.__schema_cache = {}
 
+  def SetTrusted(self, trusted):
+    """Set/clear the trusted bit in the stub.
+
+    This bit indicates that the app calling the stub is trusted. A
+    trusted app can write to datastores of other apps.
+
+    Args:
+      trusted: boolean.
+    """
+    self.__trusted = trusted
+
+  def __ValidateAppId(self, app_id):
+    """Verify that this is the stub for app_id.
+
+    Args:
+      app_id: An application ID.
+
+    Raises:
+      datastore_errors.BadRequestError: if this is not the stub for app_id.
+    """
+    if not self.__trusted and app_id != self.__app_id:
+      raise datastore_errors.BadRequestError(
+          'app %s cannot access app %s\'s data' % (self.__app_id, app_id))
+
+
   def _AppKindForKey(self, key):
     """ Get (app, kind) tuple from given key.
 
@@ -336,7 +366,7 @@
           return pickle.load(open(filename, 'rb'))
         else:
           logging.warning('Could not read datastore data from %s', filename)
-      except (AttributeError, LookupError, NameError, TypeError,
+      except (AttributeError, LookupError, ImportError, NameError, TypeError,
               ValueError, struct.error, pickle.PickleError), e:
         raise datastore_errors.InternalError(
           'Could not read data from %s. Try running with the '
@@ -394,8 +424,18 @@
   def _Dynamic_Put(self, put_request, put_response):
     clones = []
     for entity in put_request.entity_list():
+      self.__ValidateAppId(entity.key().app())
+
       clone = entity_pb.EntityProto()
       clone.CopyFrom(entity)
+
+      for property in clone.property_list():
+        if property.value().has_uservalue():
+          uid = md5.new(property.value().uservalue().email().lower()).digest()
+          uid = '1' + ''.join(['%02d' % ord(x) for x in uid])[:20]
+          property.mutable_value().mutable_uservalue().set_obfuscated_gaiaid(
+              uid)
+
       clones.append(clone)
 
       assert clone.has_key()
@@ -433,22 +473,24 @@
 
   def _Dynamic_Get(self, get_request, get_response):
     for key in get_request.key_list():
-        app_kind = self._AppKindForKey(key)
+      self.__ValidateAppId(key.app())
+      app_kind = self._AppKindForKey(key)
 
-        group = get_response.add_entity()
-        try:
-          entity = self.__entities[app_kind][key].protobuf
-        except KeyError:
-          entity = None
+      group = get_response.add_entity()
+      try:
+        entity = self.__entities[app_kind][key].protobuf
+      except KeyError:
+        entity = None
 
-        if entity:
-          group.mutable_entity().CopyFrom(entity)
+      if entity:
+        group.mutable_entity().CopyFrom(entity)
 
 
   def _Dynamic_Delete(self, delete_request, delete_response):
     self.__entities_lock.acquire()
     try:
       for key in delete_request.key_list():
+        self.__ValidateAppId(key.app())
         app_kind = self._AppKindForKey(key)
         try:
           del self.__entities[app_kind][key]
@@ -472,6 +514,9 @@
     else:
       self.__tx_lock.release()
 
+    app = query.app()
+    self.__ValidateAppId(app)
+
     if query.has_offset() and query.offset() > _MAX_QUERY_OFFSET:
       raise apiproxy_errors.ApplicationError(
           datastore_pb.Error.BAD_REQUEST, 'Too big query offset.')
@@ -485,8 +530,6 @@
           ('query is too large. may not have more than %s filters'
            ' + sort orders ancestor total' % _MAX_QUERY_COMPONENTS))
 
-    app = query.app()
-
     if self.__require_indexes:
       required, kind, ancestor, props, num_eq_filters = datastore_index.CompositeIndexForQuery(query)
       if required:
@@ -701,6 +744,7 @@
                                              'Cursor %d not found' % cursor)
 
     count = next_request.count()
+
     results_pb = [r._ToPb() for r in results[:count]]
     query_result.result_list().extend(results_pb)
     del results[:count]
@@ -708,6 +752,7 @@
     query_result.set_more_results(len(results) > 0)
 
   def _Dynamic_Count(self, query, integer64proto):
+    self.__ValidateAppId(query.app())
     query_result = datastore_pb.QueryResult()
     self._Dynamic_RunQuery(query, query_result)
     cursor = query_result.cursor().cursor()
@@ -759,6 +804,7 @@
       minfloat = -1e300000
 
     app_str = app_str.value()
+    self.__ValidateAppId(app_str)
 
     kinds = []
 
@@ -816,6 +862,7 @@
       schema.add_kind().CopyFrom(kind_pb)
 
   def _Dynamic_CreateIndex(self, index, id_response):
+    self.__ValidateAppId(index.app_id())
     if index.id() != 0:
       raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
                                              'New index id must be 0.')
@@ -843,10 +890,12 @@
       self.__indexes_lock.release()
 
   def _Dynamic_GetIndices(self, app_str, composite_indices):
+    self.__ValidateAppId(app_str.value())
     composite_indices.index_list().extend(
       self.__indexes.get(app_str.value(), []))
 
   def _Dynamic_UpdateIndex(self, index, void):
+    self.__ValidateAppId(index.app_id())
     stored_index = self.__FindIndex(index)
     if not stored_index:
       raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
@@ -866,6 +915,7 @@
       self.__indexes_lock.release()
 
   def _Dynamic_DeleteIndex(self, index, void):
+    self.__ValidateAppId(index.app_id())
     stored_index = self.__FindIndex(index)
     if not stored_index:
       raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
@@ -888,6 +938,7 @@
       entity_pb.CompositeIndex, if it exists; otherwise None
     """
     app = index.app_id()
+    self.__ValidateAppId(app)
     if app in self.__indexes:
       for stored_index in self.__indexes[app]:
         if index.definition() == stored_index.definition():
--- a/thirdparty/google_appengine/google/appengine/api/datastore_types.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/datastore_types.py	Fri Apr 24 14:16:00 2009 +0000
@@ -1171,6 +1171,10 @@
       value.auth_domain().encode('utf-8'))
   pbvalue.mutable_uservalue().set_gaiaid(0)
 
+  if value.user_id() is not None:
+    pbvalue.mutable_uservalue().set_obfuscated_gaiaid(
+        value.user_id().encode('utf-8'))
+
 
 def PackKey(name, value, pbvalue):
   """Packs a reference property into a entity_pb.PropertyValue.
@@ -1368,7 +1372,11 @@
   elif pbval.has_uservalue():
     email = unicode(pbval.uservalue().email().decode('utf-8'))
     auth_domain = unicode(pbval.uservalue().auth_domain().decode('utf-8'))
-    value = users.User(email=email, _auth_domain=auth_domain)
+    obfuscated_gaiaid = pbval.uservalue().obfuscated_gaiaid().decode('utf-8')
+    obfuscated_gaiaid = unicode(obfuscated_gaiaid)
+    value = users.User(email=email,
+                       _auth_domain=auth_domain,
+                       _user_id=obfuscated_gaiaid)
   else:
     value = None
 
@@ -1444,7 +1452,9 @@
   return _PROPERTY_TYPE_STRINGS[type_name]
 
 
-def PropertyValueFromString(type_, value_string, _auth_domain=None):
+def PropertyValueFromString(type_,
+                            value_string,
+                            _auth_domain=None):
   """Returns an instance of a property value given a type and string value.
 
   The reverse of this method is just str() and type() of the python value.
--- a/thirdparty/google_appengine/google/appengine/api/images/__init__.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/images/__init__.py	Fri Apr 24 14:16:00 2009 +0000
@@ -42,8 +42,24 @@
 
 OUTPUT_ENCODING_TYPES = frozenset([JPEG, PNG])
 
+TOP_LEFT = images_service_pb.CompositeImageOptions.TOP_LEFT
+TOP_CENTER = images_service_pb.CompositeImageOptions.TOP
+TOP_RIGHT = images_service_pb.CompositeImageOptions.TOP_RIGHT
+CENTER_LEFT = images_service_pb.CompositeImageOptions.LEFT
+CENTER_CENTER = images_service_pb.CompositeImageOptions.CENTER
+CENTER_RIGHT = images_service_pb.CompositeImageOptions.RIGHT
+BOTTOM_LEFT = images_service_pb.CompositeImageOptions.BOTTOM_LEFT
+BOTTOM_CENTER = images_service_pb.CompositeImageOptions.BOTTOM
+BOTTOM_RIGHT = images_service_pb.CompositeImageOptions.BOTTOM_RIGHT
+
+ANCHOR_TYPES = frozenset([TOP_LEFT, TOP_CENTER, TOP_RIGHT, CENTER_LEFT,
+                          CENTER_CENTER, CENTER_RIGHT, BOTTOM_LEFT,
+                          BOTTOM_CENTER, BOTTOM_RIGHT])
+
 MAX_TRANSFORMS_PER_REQUEST = 10
 
+MAX_COMPOSITES_PER_REQUEST = 16
+
 
 class Error(Exception):
   """Base error class for this module."""
@@ -296,7 +312,7 @@
       raise BadRequestError("At least one of width or height must be > 0.")
 
     if width > 4000 or height > 4000:
-      raise BadRequestError("Both width and height must be < 4000.")
+      raise BadRequestError("Both width and height must be <= 4000.")
 
     self._check_transform_limits()
 
@@ -424,7 +440,7 @@
 
     Raises:
       BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already
-      been requested for this image.
+        been requested for this image.
     """
     self._check_transform_limits()
     transform = images_service_pb.Transform()
@@ -511,6 +527,47 @@
       self._update_dimensions()
     return self._height
 
+  def histogram(self):
+    """Calculates the histogram of the image.
+
+    Returns: 3 256-element lists containing the number of occurences of each
+    value of each color in the order RGB. As described at
+    http://en.wikipedia.org/wiki/Color_histogram for N = 256. i.e. the first
+    value of the first list contains the number of pixels with a red value of
+    0, the second the number with a red value of 1.
+
+    Raises:
+      NotImageError when the image data given is not an image.
+      BadImageError when the image data given is corrupt.
+      LargeImageError when the image data given is too large to process.
+      Error when something unknown, but bad, happens.
+    """
+    request = images_service_pb.ImagesHistogramRequest()
+    response = images_service_pb.ImagesHistogramResponse()
+
+    request.mutable_image().set_content(self._image_data)
+    try:
+      apiproxy_stub_map.MakeSyncCall("images",
+                                     "Histogram",
+                                     request,
+                                     response)
+    except apiproxy_errors.ApplicationError, e:
+      if (e.application_error ==
+          images_service_pb.ImagesServiceError.NOT_IMAGE):
+        raise NotImageError()
+      elif (e.application_error ==
+            images_service_pb.ImagesServiceError.BAD_IMAGE_DATA):
+        raise BadImageError()
+      elif (e.application_error ==
+            images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE):
+        raise LargeImageError()
+      else:
+        raise Error()
+    histogram = response.histogram()
+    return [histogram.red_list(),
+            histogram.green_list(),
+            histogram.blue_list()]
+
 
 def resize(image_data, width=0, height=0, output_encoding=PNG):
   """Resize a given image file maintaining the aspect ratio.
@@ -631,3 +688,140 @@
   image = Image(image_data)
   image.im_feeling_lucky()
   return image.execute_transforms(output_encoding=output_encoding)
+
+def composite(inputs, width, height, color=0, output_encoding=PNG):
+  """Composite one or more images onto a canvas.
+
+  Args:
+    inputs: a list of tuples (image_data, x_offset, y_offset, opacity, anchor)
+    where
+      image_data: str, source image data.
+      x_offset: x offset in pixels from the anchor position
+      y_offset: y offset in piyels from the anchor position
+      opacity: opacity of the image specified as a float in range [0.0, 1.0]
+      anchor: anchoring point from ANCHOR_POINTS. The anchor point of the image
+      is aligned with the same anchor point of the canvas. e.g. TOP_RIGHT would
+      place the top right corner of the image at the top right corner of the
+      canvas then apply the x and y offsets.
+    width: canvas width in pixels.
+    height: canvas height in pixels.
+    color: canvas background color encoded as a 32 bit unsigned int where each
+    color channel is represented by one byte in order ARGB.
+    output_encoding: a value from OUTPUT_ENCODING_TYPES.
+
+  Returns:
+      str, image data of the composited image.
+
+  Raises:
+    TypeError If width, height, color, x_offset or y_offset are not of type
+    int or long or if opacity is not a float
+    BadRequestError If more than MAX_TRANSFORMS_PER_REQUEST compositions have
+    been requested, if the canvas width or height is greater than 4000 or less
+    than or equal to 0, if the color is invalid or if for any composition
+    option, the opacity is outside the range [0,1] or the anchor is invalid.
+  """
+  if (not isinstance(width, (int, long)) or
+      not isinstance(height, (int, long)) or
+      not isinstance(color, (int, long))):
+    raise TypeError("Width, height and color must be integers.")
+  if output_encoding not in OUTPUT_ENCODING_TYPES:
+    raise BadRequestError("Output encoding type '%s' not in recognized set "
+                          "%s" % (output_encoding, OUTPUT_ENCODING_TYPES))
+
+  if not inputs:
+    raise BadRequestError("Must provide at least one input")
+  if len(inputs) > MAX_COMPOSITES_PER_REQUEST:
+    raise BadRequestError("A maximum of %d composition operations can be"
+                          "performed in a single request" %
+                          MAX_COMPOSITES_PER_REQUEST)
+
+  if width <= 0 or height <= 0:
+    raise BadRequestError("Width and height must be > 0.")
+  if width > 4000 or height > 4000:
+    raise BadRequestError("Width and height must be <= 4000.")
+
+  if color > 0xffffffff or color < 0:
+    raise BadRequestError("Invalid color")
+  if color >= 0x80000000:
+    color -= 0x100000000
+
+  image_map = {}
+
+  request = images_service_pb.ImagesCompositeRequest()
+  response = images_service_pb.ImagesTransformResponse()
+  for (image, x, y, opacity, anchor) in inputs:
+    if not image:
+      raise BadRequestError("Each input must include an image")
+    if (not isinstance(x, (int, long)) or
+        not isinstance(y, (int, long)) or
+        not isinstance(opacity, (float))):
+      raise TypeError("x_offset, y_offset must be integers and opacity must"
+                      "be a float")
+    if x > 4000 or x < -4000:
+      raise BadRequestError("xOffsets must be in range [-4000, 4000]")
+    if y > 4000 or y < -4000:
+      raise BadRequestError("yOffsets must be in range [-4000, 4000]")
+    if opacity < 0 or opacity > 1:
+      raise BadRequestError("Opacity must be in the range 0.0 to 1.0")
+    if anchor not in ANCHOR_TYPES:
+      raise BadRequestError("Anchor type '%s' not in recognized set %s" %
+                            (anchor, ANCHOR_TYPES))
+    if image not in image_map:
+      image_map[image] = request.image_size()
+      request.add_image().set_content(image)
+
+    option = request.add_options()
+    option.set_x_offset(x)
+    option.set_y_offset(y)
+    option.set_opacity(opacity)
+    option.set_anchor(anchor)
+    option.set_source_index(image_map[image])
+
+  request.mutable_canvas().mutable_output().set_mime_type(output_encoding)
+  request.mutable_canvas().set_width(width)
+  request.mutable_canvas().set_height(height)
+  request.mutable_canvas().set_color(color)
+
+  try:
+    apiproxy_stub_map.MakeSyncCall("images",
+                                   "Composite",
+                                   request,
+                                   response)
+  except apiproxy_errors.ApplicationError, e:
+    if (e.application_error ==
+        images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA):
+      raise BadRequestError()
+    elif (e.application_error ==
+          images_service_pb.ImagesServiceError.NOT_IMAGE):
+      raise NotImageError()
+    elif (e.application_error ==
+          images_service_pb.ImagesServiceError.BAD_IMAGE_DATA):
+      raise BadImageError()
+    elif (e.application_error ==
+          images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE):
+      raise LargeImageError()
+    elif (e.application_error ==
+          images_service_pb.ImagesServiceError.UNSPECIFIED_ERROR):
+      raise TransformationError()
+    else:
+      raise Error()
+
+  return response.image().content()
+
+
+def histogram(image_data):
+  """Calculates the histogram of the given image.
+
+  Args:
+    image_data: str, source image data.
+  Returns: 3 256-element lists containing the number of occurences of each
+  value of each color in the order RGB.
+
+  Raises:
+    NotImageError when the image data given is not an image.
+    BadImageError when the image data given is corrupt.
+    LargeImageError when the image data given is too large to process.
+    Error when something unknown, but bad, happens.
+  """
+  image = Image(image_data)
+  return image.histogram()
--- a/thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/images/images_service_pb.py	Fri Apr 24 14:16:00 2009 +0000
@@ -941,5 +941,1026 @@
 
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
+class CompositeImageOptions(ProtocolBuffer.ProtocolMessage):
 
-__all__ = ['ImagesServiceError','ImagesServiceTransform','Transform','ImageData','OutputSettings','ImagesTransformRequest','ImagesTransformResponse']
+  TOP_LEFT     =    0
+  TOP          =    1
+  TOP_RIGHT    =    2
+  LEFT         =    3
+  CENTER       =    4
+  RIGHT        =    5
+  BOTTOM_LEFT  =    6
+  BOTTOM       =    7
+  BOTTOM_RIGHT =    8
+
+  _ANCHOR_NAMES = {
+    0: "TOP_LEFT",
+    1: "TOP",
+    2: "TOP_RIGHT",
+    3: "LEFT",
+    4: "CENTER",
+    5: "RIGHT",
+    6: "BOTTOM_LEFT",
+    7: "BOTTOM",
+    8: "BOTTOM_RIGHT",
+  }
+
+  def ANCHOR_Name(cls, x): return cls._ANCHOR_NAMES.get(x, "")
+  ANCHOR_Name = classmethod(ANCHOR_Name)
+
+  has_source_index_ = 0
+  source_index_ = 0
+  has_x_offset_ = 0
+  x_offset_ = 0
+  has_y_offset_ = 0
+  y_offset_ = 0
+  has_opacity_ = 0
+  opacity_ = 0.0
+  has_anchor_ = 0
+  anchor_ = 0
+
+  def __init__(self, contents=None):
+    if contents is not None: self.MergeFromString(contents)
+
+  def source_index(self): return self.source_index_
+
+  def set_source_index(self, x):
+    self.has_source_index_ = 1
+    self.source_index_ = x
+
+  def clear_source_index(self):
+    if self.has_source_index_:
+      self.has_source_index_ = 0
+      self.source_index_ = 0
+
+  def has_source_index(self): return self.has_source_index_
+
+  def x_offset(self): return self.x_offset_
+
+  def set_x_offset(self, x):
+    self.has_x_offset_ = 1
+    self.x_offset_ = x
+
+  def clear_x_offset(self):
+    if self.has_x_offset_:
+      self.has_x_offset_ = 0
+      self.x_offset_ = 0
+
+  def has_x_offset(self): return self.has_x_offset_
+
+  def y_offset(self): return self.y_offset_
+
+  def set_y_offset(self, x):
+    self.has_y_offset_ = 1
+    self.y_offset_ = x
+
+  def clear_y_offset(self):
+    if self.has_y_offset_:
+      self.has_y_offset_ = 0
+      self.y_offset_ = 0
+
+  def has_y_offset(self): return self.has_y_offset_
+
+  def opacity(self): return self.opacity_
+
+  def set_opacity(self, x):
+    self.has_opacity_ = 1
+    self.opacity_ = x
+
+  def clear_opacity(self):
+    if self.has_opacity_:
+      self.has_opacity_ = 0
+      self.opacity_ = 0.0
+
+  def has_opacity(self): return self.has_opacity_
+
+  def anchor(self): return self.anchor_
+
+  def set_anchor(self, x):
+    self.has_anchor_ = 1
+    self.anchor_ = x
+
+  def clear_anchor(self):
+    if self.has_anchor_:
+      self.has_anchor_ = 0
+      self.anchor_ = 0
+
+  def has_anchor(self): return self.has_anchor_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_source_index()): self.set_source_index(x.source_index())
+    if (x.has_x_offset()): self.set_x_offset(x.x_offset())
+    if (x.has_y_offset()): self.set_y_offset(x.y_offset())
+    if (x.has_opacity()): self.set_opacity(x.opacity())
+    if (x.has_anchor()): self.set_anchor(x.anchor())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_source_index_ != x.has_source_index_: return 0
+    if self.has_source_index_ and self.source_index_ != x.source_index_: return 0
+    if self.has_x_offset_ != x.has_x_offset_: return 0
+    if self.has_x_offset_ and self.x_offset_ != x.x_offset_: return 0
+    if self.has_y_offset_ != x.has_y_offset_: return 0
+    if self.has_y_offset_ and self.y_offset_ != x.y_offset_: return 0
+    if self.has_opacity_ != x.has_opacity_: return 0
+    if self.has_opacity_ and self.opacity_ != x.opacity_: return 0
+    if self.has_anchor_ != x.has_anchor_: return 0
+    if self.has_anchor_ and self.anchor_ != x.anchor_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_source_index_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: source_index not set.')
+    if (not self.has_x_offset_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: x_offset not set.')
+    if (not self.has_y_offset_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: y_offset not set.')
+    if (not self.has_opacity_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: opacity not set.')
+    if (not self.has_anchor_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: anchor not set.')
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthVarInt64(self.source_index_)
+    n += self.lengthVarInt64(self.x_offset_)
+    n += self.lengthVarInt64(self.y_offset_)
+    n += self.lengthVarInt64(self.anchor_)
+    return n + 9
+
+  def Clear(self):
+    self.clear_source_index()
+    self.clear_x_offset()
+    self.clear_y_offset()
+    self.clear_opacity()
+    self.clear_anchor()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(8)
+    out.putVarInt32(self.source_index_)
+    out.putVarInt32(16)
+    out.putVarInt32(self.x_offset_)
+    out.putVarInt32(24)
+    out.putVarInt32(self.y_offset_)
+    out.putVarInt32(37)
+    out.putFloat(self.opacity_)
+    out.putVarInt32(40)
+    out.putVarInt32(self.anchor_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 8:
+        self.set_source_index(d.getVarInt32())
+        continue
+      if tt == 16:
+        self.set_x_offset(d.getVarInt32())
+        continue
+      if tt == 24:
+        self.set_y_offset(d.getVarInt32())
+        continue
+      if tt == 37:
+        self.set_opacity(d.getFloat())
+        continue
+      if tt == 40:
+        self.set_anchor(d.getVarInt32())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_source_index_: res+=prefix+("source_index: %s\n" % self.DebugFormatInt32(self.source_index_))
+    if self.has_x_offset_: res+=prefix+("x_offset: %s\n" % self.DebugFormatInt32(self.x_offset_))
+    if self.has_y_offset_: res+=prefix+("y_offset: %s\n" % self.DebugFormatInt32(self.y_offset_))
+    if self.has_opacity_: res+=prefix+("opacity: %s\n" % self.DebugFormatFloat(self.opacity_))
+    if self.has_anchor_: res+=prefix+("anchor: %s\n" % self.DebugFormatInt32(self.anchor_))
+    return res
+
+  ksource_index = 1
+  kx_offset = 2
+  ky_offset = 3
+  kopacity = 4
+  kanchor = 5
+
+  _TEXT = (
+   "ErrorCode",
+   "source_index",
+   "x_offset",
+   "y_offset",
+   "opacity",
+   "anchor",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.FLOAT,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ImagesCanvas(ProtocolBuffer.ProtocolMessage):
+  has_width_ = 0
+  width_ = 0
+  has_height_ = 0
+  height_ = 0
+  has_output_ = 0
+  has_color_ = 0
+  color_ = -1
+
+  def __init__(self, contents=None):
+    self.output_ = OutputSettings()
+    if contents is not None: self.MergeFromString(contents)
+
+  def width(self): return self.width_
+
+  def set_width(self, x):
+    self.has_width_ = 1
+    self.width_ = x
+
+  def clear_width(self):
+    if self.has_width_:
+      self.has_width_ = 0
+      self.width_ = 0
+
+  def has_width(self): return self.has_width_
+
+  def height(self): return self.height_
+
+  def set_height(self, x):
+    self.has_height_ = 1
+    self.height_ = x
+
+  def clear_height(self):
+    if self.has_height_:
+      self.has_height_ = 0
+      self.height_ = 0
+
+  def has_height(self): return self.has_height_
+
+  def output(self): return self.output_
+
+  def mutable_output(self): self.has_output_ = 1; return self.output_
+
+  def clear_output(self):self.has_output_ = 0; self.output_.Clear()
+
+  def has_output(self): return self.has_output_
+
+  def color(self): return self.color_
+
+  def set_color(self, x):
+    self.has_color_ = 1
+    self.color_ = x
+
+  def clear_color(self):
+    if self.has_color_:
+      self.has_color_ = 0
+      self.color_ = -1
+
+  def has_color(self): return self.has_color_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_width()): self.set_width(x.width())
+    if (x.has_height()): self.set_height(x.height())
+    if (x.has_output()): self.mutable_output().MergeFrom(x.output())
+    if (x.has_color()): self.set_color(x.color())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_width_ != x.has_width_: return 0
+    if self.has_width_ and self.width_ != x.width_: return 0
+    if self.has_height_ != x.has_height_: return 0
+    if self.has_height_ and self.height_ != x.height_: return 0
+    if self.has_output_ != x.has_output_: return 0
+    if self.has_output_ and self.output_ != x.output_: return 0
+    if self.has_color_ != x.has_color_: return 0
+    if self.has_color_ and self.color_ != x.color_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_width_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: width not set.')
+    if (not self.has_height_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: height not set.')
+    if (not self.has_output_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: output not set.')
+    elif not self.output_.IsInitialized(debug_strs): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthVarInt64(self.width_)
+    n += self.lengthVarInt64(self.height_)
+    n += self.lengthString(self.output_.ByteSize())
+    if (self.has_color_): n += 1 + self.lengthVarInt64(self.color_)
+    return n + 3
+
+  def Clear(self):
+    self.clear_width()
+    self.clear_height()
+    self.clear_output()
+    self.clear_color()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(8)
+    out.putVarInt32(self.width_)
+    out.putVarInt32(16)
+    out.putVarInt32(self.height_)
+    out.putVarInt32(26)
+    out.putVarInt32(self.output_.ByteSize())
+    self.output_.OutputUnchecked(out)
+    if (self.has_color_):
+      out.putVarInt32(32)
+      out.putVarInt32(self.color_)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 8:
+        self.set_width(d.getVarInt32())
+        continue
+      if tt == 16:
+        self.set_height(d.getVarInt32())
+        continue
+      if tt == 26:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_output().TryMerge(tmp)
+        continue
+      if tt == 32:
+        self.set_color(d.getVarInt32())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_width_: res+=prefix+("width: %s\n" % self.DebugFormatInt32(self.width_))
+    if self.has_height_: res+=prefix+("height: %s\n" % self.DebugFormatInt32(self.height_))
+    if self.has_output_:
+      res+=prefix+"output <\n"
+      res+=self.output_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    if self.has_color_: res+=prefix+("color: %s\n" % self.DebugFormatInt32(self.color_))
+    return res
+
+  kwidth = 1
+  kheight = 2
+  koutput = 3
+  kcolor = 4
+
+  _TEXT = (
+   "ErrorCode",
+   "width",
+   "height",
+   "output",
+   "color",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ImagesCompositeRequest(ProtocolBuffer.ProtocolMessage):
+  has_canvas_ = 0
+
+  def __init__(self, contents=None):
+    self.image_ = []
+    self.options_ = []
+    self.canvas_ = ImagesCanvas()
+    if contents is not None: self.MergeFromString(contents)
+
+  def image_size(self): return len(self.image_)
+  def image_list(self): return self.image_
+
+  def image(self, i):
+    return self.image_[i]
+
+  def mutable_image(self, i):
+    return self.image_[i]
+
+  def add_image(self):
+    x = ImageData()
+    self.image_.append(x)
+    return x
+
+  def clear_image(self):
+    self.image_ = []
+  def options_size(self): return len(self.options_)
+  def options_list(self): return self.options_
+
+  def options(self, i):
+    return self.options_[i]
+
+  def mutable_options(self, i):
+    return self.options_[i]
+
+  def add_options(self):
+    x = CompositeImageOptions()
+    self.options_.append(x)
+    return x
+
+  def clear_options(self):
+    self.options_ = []
+  def canvas(self): return self.canvas_
+
+  def mutable_canvas(self): self.has_canvas_ = 1; return self.canvas_
+
+  def clear_canvas(self):self.has_canvas_ = 0; self.canvas_.Clear()
+
+  def has_canvas(self): return self.has_canvas_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.image_size()): self.add_image().CopyFrom(x.image(i))
+    for i in xrange(x.options_size()): self.add_options().CopyFrom(x.options(i))
+    if (x.has_canvas()): self.mutable_canvas().MergeFrom(x.canvas())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.image_) != len(x.image_): return 0
+    for e1, e2 in zip(self.image_, x.image_):
+      if e1 != e2: return 0
+    if len(self.options_) != len(x.options_): return 0
+    for e1, e2 in zip(self.options_, x.options_):
+      if e1 != e2: return 0
+    if self.has_canvas_ != x.has_canvas_: return 0
+    if self.has_canvas_ and self.canvas_ != x.canvas_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    for p in self.image_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    for p in self.options_:
+      if not p.IsInitialized(debug_strs): initialized=0
+    if (not self.has_canvas_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: canvas not set.')
+    elif not self.canvas_.IsInitialized(debug_strs): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 1 * len(self.image_)
+    for i in xrange(len(self.image_)): n += self.lengthString(self.image_[i].ByteSize())
+    n += 1 * len(self.options_)
+    for i in xrange(len(self.options_)): n += self.lengthString(self.options_[i].ByteSize())
+    n += self.lengthString(self.canvas_.ByteSize())
+    return n + 1
+
+  def Clear(self):
+    self.clear_image()
+    self.clear_options()
+    self.clear_canvas()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.image_)):
+      out.putVarInt32(10)
+      out.putVarInt32(self.image_[i].ByteSize())
+      self.image_[i].OutputUnchecked(out)
+    for i in xrange(len(self.options_)):
+      out.putVarInt32(18)
+      out.putVarInt32(self.options_[i].ByteSize())
+      self.options_[i].OutputUnchecked(out)
+    out.putVarInt32(26)
+    out.putVarInt32(self.canvas_.ByteSize())
+    self.canvas_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_image().TryMerge(tmp)
+        continue
+      if tt == 18:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.add_options().TryMerge(tmp)
+        continue
+      if tt == 26:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_canvas().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.image_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("image%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    cnt=0
+    for e in self.options_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("options%s <\n" % elm)
+      res+=e.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+      cnt+=1
+    if self.has_canvas_:
+      res+=prefix+"canvas <\n"
+      res+=self.canvas_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kimage = 1
+  koptions = 2
+  kcanvas = 3
+
+  _TEXT = (
+   "ErrorCode",
+   "image",
+   "options",
+   "canvas",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ImagesCompositeResponse(ProtocolBuffer.ProtocolMessage):
+  has_image_ = 0
+
+  def __init__(self, contents=None):
+    self.image_ = ImageData()
+    if contents is not None: self.MergeFromString(contents)
+
+  def image(self): return self.image_
+
+  def mutable_image(self): self.has_image_ = 1; return self.image_
+
+  def clear_image(self):self.has_image_ = 0; self.image_.Clear()
+
+  def has_image(self): return self.has_image_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_image()): self.mutable_image().MergeFrom(x.image())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_image_ != x.has_image_: return 0
+    if self.has_image_ and self.image_ != x.image_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_image_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: image not set.')
+    elif not self.image_.IsInitialized(debug_strs): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(self.image_.ByteSize())
+    return n + 1
+
+  def Clear(self):
+    self.clear_image()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putVarInt32(self.image_.ByteSize())
+    self.image_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_image().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_image_:
+      res+=prefix+"image <\n"
+      res+=self.image_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kimage = 1
+
+  _TEXT = (
+   "ErrorCode",
+   "image",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ImagesHistogramRequest(ProtocolBuffer.ProtocolMessage):
+  has_image_ = 0
+
+  def __init__(self, contents=None):
+    self.image_ = ImageData()
+    if contents is not None: self.MergeFromString(contents)
+
+  def image(self): return self.image_
+
+  def mutable_image(self): self.has_image_ = 1; return self.image_
+
+  def clear_image(self):self.has_image_ = 0; self.image_.Clear()
+
+  def has_image(self): return self.has_image_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_image()): self.mutable_image().MergeFrom(x.image())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_image_ != x.has_image_: return 0
+    if self.has_image_ and self.image_ != x.image_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_image_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: image not set.')
+    elif not self.image_.IsInitialized(debug_strs): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(self.image_.ByteSize())
+    return n + 1
+
+  def Clear(self):
+    self.clear_image()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putVarInt32(self.image_.ByteSize())
+    self.image_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_image().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_image_:
+      res+=prefix+"image <\n"
+      res+=self.image_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  kimage = 1
+
+  _TEXT = (
+   "ErrorCode",
+   "image",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ImagesHistogram(ProtocolBuffer.ProtocolMessage):
+
+  def __init__(self, contents=None):
+    self.red_ = []
+    self.green_ = []
+    self.blue_ = []
+    if contents is not None: self.MergeFromString(contents)
+
+  def red_size(self): return len(self.red_)
+  def red_list(self): return self.red_
+
+  def red(self, i):
+    return self.red_[i]
+
+  def set_red(self, i, x):
+    self.red_[i] = x
+
+  def add_red(self, x):
+    self.red_.append(x)
+
+  def clear_red(self):
+    self.red_ = []
+
+  def green_size(self): return len(self.green_)
+  def green_list(self): return self.green_
+
+  def green(self, i):
+    return self.green_[i]
+
+  def set_green(self, i, x):
+    self.green_[i] = x
+
+  def add_green(self, x):
+    self.green_.append(x)
+
+  def clear_green(self):
+    self.green_ = []
+
+  def blue_size(self): return len(self.blue_)
+  def blue_list(self): return self.blue_
+
+  def blue(self, i):
+    return self.blue_[i]
+
+  def set_blue(self, i, x):
+    self.blue_[i] = x
+
+  def add_blue(self, x):
+    self.blue_.append(x)
+
+  def clear_blue(self):
+    self.blue_ = []
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    for i in xrange(x.red_size()): self.add_red(x.red(i))
+    for i in xrange(x.green_size()): self.add_green(x.green(i))
+    for i in xrange(x.blue_size()): self.add_blue(x.blue(i))
+
+  def Equals(self, x):
+    if x is self: return 1
+    if len(self.red_) != len(x.red_): return 0
+    for e1, e2 in zip(self.red_, x.red_):
+      if e1 != e2: return 0
+    if len(self.green_) != len(x.green_): return 0
+    for e1, e2 in zip(self.green_, x.green_):
+      if e1 != e2: return 0
+    if len(self.blue_) != len(x.blue_): return 0
+    for e1, e2 in zip(self.blue_, x.blue_):
+      if e1 != e2: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += 1 * len(self.red_)
+    for i in xrange(len(self.red_)): n += self.lengthVarInt64(self.red_[i])
+    n += 1 * len(self.green_)
+    for i in xrange(len(self.green_)): n += self.lengthVarInt64(self.green_[i])
+    n += 1 * len(self.blue_)
+    for i in xrange(len(self.blue_)): n += self.lengthVarInt64(self.blue_[i])
+    return n + 0
+
+  def Clear(self):
+    self.clear_red()
+    self.clear_green()
+    self.clear_blue()
+
+  def OutputUnchecked(self, out):
+    for i in xrange(len(self.red_)):
+      out.putVarInt32(8)
+      out.putVarInt32(self.red_[i])
+    for i in xrange(len(self.green_)):
+      out.putVarInt32(16)
+      out.putVarInt32(self.green_[i])
+    for i in xrange(len(self.blue_)):
+      out.putVarInt32(24)
+      out.putVarInt32(self.blue_[i])
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 8:
+        self.add_red(d.getVarInt32())
+        continue
+      if tt == 16:
+        self.add_green(d.getVarInt32())
+        continue
+      if tt == 24:
+        self.add_blue(d.getVarInt32())
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    cnt=0
+    for e in self.red_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("red%s: %s\n" % (elm, self.DebugFormatInt32(e)))
+      cnt+=1
+    cnt=0
+    for e in self.green_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("green%s: %s\n" % (elm, self.DebugFormatInt32(e)))
+      cnt+=1
+    cnt=0
+    for e in self.blue_:
+      elm=""
+      if printElemNumber: elm="(%d)" % cnt
+      res+=prefix+("blue%s: %s\n" % (elm, self.DebugFormatInt32(e)))
+      cnt+=1
+    return res
+
+  kred = 1
+  kgreen = 2
+  kblue = 3
+
+  _TEXT = (
+   "ErrorCode",
+   "red",
+   "green",
+   "blue",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+class ImagesHistogramResponse(ProtocolBuffer.ProtocolMessage):
+  has_histogram_ = 0
+
+  def __init__(self, contents=None):
+    self.histogram_ = ImagesHistogram()
+    if contents is not None: self.MergeFromString(contents)
+
+  def histogram(self): return self.histogram_
+
+  def mutable_histogram(self): self.has_histogram_ = 1; return self.histogram_
+
+  def clear_histogram(self):self.has_histogram_ = 0; self.histogram_.Clear()
+
+  def has_histogram(self): return self.has_histogram_
+
+
+  def MergeFrom(self, x):
+    assert x is not self
+    if (x.has_histogram()): self.mutable_histogram().MergeFrom(x.histogram())
+
+  def Equals(self, x):
+    if x is self: return 1
+    if self.has_histogram_ != x.has_histogram_: return 0
+    if self.has_histogram_ and self.histogram_ != x.histogram_: return 0
+    return 1
+
+  def IsInitialized(self, debug_strs=None):
+    initialized = 1
+    if (not self.has_histogram_):
+      initialized = 0
+      if debug_strs is not None:
+        debug_strs.append('Required field: histogram not set.')
+    elif not self.histogram_.IsInitialized(debug_strs): initialized = 0
+    return initialized
+
+  def ByteSize(self):
+    n = 0
+    n += self.lengthString(self.histogram_.ByteSize())
+    return n + 1
+
+  def Clear(self):
+    self.clear_histogram()
+
+  def OutputUnchecked(self, out):
+    out.putVarInt32(10)
+    out.putVarInt32(self.histogram_.ByteSize())
+    self.histogram_.OutputUnchecked(out)
+
+  def TryMerge(self, d):
+    while d.avail() > 0:
+      tt = d.getVarInt32()
+      if tt == 10:
+        length = d.getVarInt32()
+        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
+        d.skip(length)
+        self.mutable_histogram().TryMerge(tmp)
+        continue
+      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
+      d.skipData(tt)
+
+
+  def __str__(self, prefix="", printElemNumber=0):
+    res=""
+    if self.has_histogram_:
+      res+=prefix+"histogram <\n"
+      res+=self.histogram_.__str__(prefix + "  ", printElemNumber)
+      res+=prefix+">\n"
+    return res
+
+  khistogram = 1
+
+  _TEXT = (
+   "ErrorCode",
+   "histogram",
+  )
+
+  _TYPES = (
+   ProtocolBuffer.Encoder.NUMERIC,
+   ProtocolBuffer.Encoder.STRING,
+
+  )
+
+  _STYLE = """"""
+  _STYLE_CONTENT_TYPE = """"""
+
+__all__ = ['ImagesServiceError','ImagesServiceTransform','Transform','ImageData','OutputSettings','ImagesTransformRequest','ImagesTransformResponse','CompositeImageOptions','ImagesCanvas','ImagesCompositeRequest','ImagesCompositeResponse','ImagesHistogramRequest','ImagesHistogram','ImagesHistogramResponse']
--- a/thirdparty/google_appengine/google/appengine/api/images/images_stub.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/images/images_stub.py	Fri Apr 24 14:16:00 2009 +0000
@@ -48,6 +48,92 @@
     super(ImagesServiceStub, self).__init__(service_name)
     Image.init()
 
+  def _Dynamic_Composite(self, request, response):
+    """Implementation of ImagesService::Composite.
+
+    Based off documentation of the PIL library at
+    http://www.pythonware.com/library/pil/handbook/index.htm
+
+    Args:
+      request: ImagesCompositeRequest, contains image request info.
+      response: ImagesCompositeResponse, contains transformed image.
+    """
+    width = request.canvas().width()
+    height = request.canvas().height()
+    color = request.canvas().color() % 0x100000000
+    reordered_color = int((color & 0xff000000) | ((color >> 16) & 0xff) |
+                          (color & 0xff00) | (color & 0xff) << 16)
+    canvas = Image.new("RGBA", (width, height), reordered_color)
+    sources = []
+    if (not request.canvas().width() or request.canvas().width() > 4000 or
+        not request.canvas().height() or request.canvas().height() > 4000):
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
+    if not request.image_size():
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
+    if not request.options_size():
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
+    if request.options_size() > images.MAX_COMPOSITES_PER_REQUEST:
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
+    for image in request.image_list():
+      sources.append(self._OpenImage(image.content()))
+
+    for options in request.options_list():
+      if (options.anchor() < images.TOP_LEFT or
+          options.anchor() > images.BOTTOM_RIGHT):
+        raise apiproxy_errors.ApplicationError(
+            images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
+      if options.source_index() >= len(sources) or options.source_index() < 0:
+        raise apiproxy_errors.ApplicationError(
+            images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
+      if options.opacity() < 0 or options.opacity() > 1:
+        raise apiproxy_errors.ApplicationError(
+            images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
+      source = sources[options.source_index()]
+      x_anchor = (options.anchor() % 3) * 0.5
+      y_anchor = (options.anchor() / 3) * 0.5
+      x_offset = int(options.x_offset() + x_anchor * (width - source.size[0]))
+      y_offset = int(options.y_offset() + y_anchor * (height - source.size[1]))
+      alpha = options.opacity() * 255
+      mask = Image.new("L", source.size, alpha)
+      canvas.paste(source, (x_offset, y_offset), mask)
+    response_value = self._EncodeImage(canvas, request.canvas().output())
+    response.mutable_image().set_content(response_value)
+
+  def _Dynamic_Histogram(self, request, response):
+    """Trivial implementation of ImagesService::Histogram.
+
+    Based off documentation of the PIL library at
+    http://www.pythonware.com/library/pil/handbook/index.htm
+
+    Args:
+      request: ImagesHistogramRequest, contains the image.
+      response: ImagesHistogramResponse, contains histogram of the image.
+    """
+    image = self._OpenImage(request.image().content())
+    img_format = image.format
+    if img_format not in ("BMP", "GIF", "ICO", "JPEG", "PNG", "TIFF"):
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.NOT_IMAGE)
+    image = image.convert("RGBA")
+    red = [0] * 256
+    green = [0] * 256
+    blue = [0] * 256
+    for pixel in image.getdata():
+      red[int((pixel[0] * pixel[3]) / 255)] += 1
+      green[int((pixel[1] * pixel[3]) / 255)] += 1
+      blue[int((pixel[2] * pixel[3]) / 255)] += 1
+    histogram = response.mutable_histogram()
+    for value in red:
+      histogram.add_red(value)
+    for value in green:
+      histogram.add_green(value)
+    for value in blue:
+      histogram.add_blue(value)
+
   def _Dynamic_Transform(self, request, response):
     """Trivial implementation of ImagesService::Transform.
 
@@ -58,22 +144,7 @@
       request: ImagesTransformRequest, contains image request info.
       response: ImagesTransformResponse, contains transformed image.
     """
-    image = request.image().content()
-    if not image:
-      raise apiproxy_errors.ApplicationError(
-          images_service_pb.ImagesServiceError.NOT_IMAGE)
-
-    image = StringIO.StringIO(image)
-    try:
-      original_image = Image.open(image)
-    except IOError:
-      raise apiproxy_errors.ApplicationError(
-          images_service_pb.ImagesServiceError.BAD_IMAGE_DATA)
-
-    img_format = original_image.format
-    if img_format not in ("BMP", "GIF", "ICO", "JPEG", "PNG", "TIFF"):
-      raise apiproxy_errors.ApplicationError(
-          images_service_pb.ImagesServiceError.NOT_IMAGE)
+    original_image = self._OpenImage(request.image().content())
 
     new_image = self._ProcessTransforms(original_image,
                                         request.transform_list())
@@ -104,6 +175,36 @@
 
     return image_string.getvalue()
 
+  def _OpenImage(self, image):
+    """Opens an image provided as a string.
+
+    Args:
+      image: image data to be opened
+
+    Raises:
+      apiproxy_errors.ApplicationError if the image cannot be opened or if it
+      is an unsupported format.
+
+    Returns:
+      Image containing the image data passed in.
+    """
+    if not image:
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.NOT_IMAGE)
+
+    image = StringIO.StringIO(image)
+    try:
+      image = Image.open(image)
+    except IOError:
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.BAD_IMAGE_DATA)
+
+    img_format = image.format
+    if img_format not in ("BMP", "GIF", "ICO", "JPEG", "PNG", "TIFF"):
+      raise apiproxy_errors.ApplicationError(
+          images_service_pb.ImagesServiceError.NOT_IMAGE)
+    return image
+
   def _ValidateCropArg(self, arg):
     """Check an argument for the Crop transform.
 
--- a/thirdparty/google_appengine/google/appengine/api/mail.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/mail.py	Fri Apr 24 14:16:00 2009 +0000
@@ -28,7 +28,6 @@
 from email import MIMEBase
 from email import MIMEMultipart
 from email import MIMEText
-import mimetypes
 import types
 
 from google.appengine.api import api_base_pb
@@ -51,20 +50,32 @@
 }
 
 
-EXTENSION_WHITELIST = set([
-  'bmp',
-  'css',
-  'csv',
-  'gif',
-  'html', 'htm',
-  'jpeg', 'jpg', 'jpe',
-  'pdf',
-  'png',
-  'rss',
-  'text', 'txt', 'asc', 'diff', 'pot',
-  'tiff', 'tif',
-  'wbmp',
-])
+EXTENSION_MIME_MAP = {
+  'asc': 'text/plain',
+  'bmp': 'image/x-ms-bmp',
+  'css': 'text/css',
+  'csv': 'text/csv',
+  'diff': 'text/plain',
+  'gif': 'image/gif',
+  'htm': 'text/html',
+  'html': 'text/html',
+  'ics': 'text/calendar',
+  'jpe': 'image/jpeg',
+  'jpeg': 'image/jpeg',
+  'jpg': 'image/jpeg',
+  'pdf': 'application/pdf',
+  'png': 'image/png',
+  'pot': 'text/plain',
+  'rss': 'text/rss+xml',
+  'text': 'text/plain',
+  'tif': 'image/tiff',
+  'tiff': 'image/tiff',
+  'txt': 'text/plain',
+  'vcf': 'text/directory',
+  'wbmp': 'image/vnd.wap.wbmp',
+}
+
+EXTENSION_WHITELIST = frozenset(EXTENSION_MIME_MAP.iterkeys())
 
 
 def invalid_email_reason(email_address, field):
@@ -234,6 +245,35 @@
 SendMailToAdmins = send_mail_to_admins
 
 
+def _GetMimeType(file_name):
+  """Determine mime-type from file name.
+
+  Parses file name and determines mime-type based on extension map.
+
+  This method is not part of the public API and should not be used by
+  applications.
+
+  Args:
+    file_name: File to determine extension for.
+
+  Returns:
+    Mime-type associated with file extension.
+
+  Raises:
+    InvalidAttachmentTypeError when the file name of an attachment.
+  """
+  extension_index = file_name.rfind('.')
+  if extension_index == -1:
+    raise InvalidAttachmentTypeError(
+        "File '%s' does not have an extension" % file_name)
+  extension = file_name[extension_index + 1:]
+  mime_type = EXTENSION_MIME_MAP.get(extension, None)
+  if mime_type is None:
+    raise InvalidAttachmentTypeError(
+        "Extension '%s' is not supported." % extension)
+  return mime_type
+
+
 def mail_message_to_mime_message(protocol_message):
   """Generate a MIMEMultitype message from protocol buffer.
 
@@ -249,6 +289,9 @@
 
   Returns:
     MIMEMultitype representing the provided MailMessage.
+
+  Raises:
+    InvalidAttachmentTypeError when the file name of an attachment
   """
   parts = []
   if protocol_message.has_textbody():
@@ -264,14 +307,13 @@
 
   result = MIMEMultipart.MIMEMultipart(_subparts=payload)
   for attachment in protocol_message.attachment_list():
-    mime_type, encoding = mimetypes.guess_type(attachment.filename())
-    assert mime_type is not None
+    file_name = attachment.filename()
+    mime_type = _GetMimeType(file_name)
     maintype, subtype = mime_type.split('/')
     mime_attachment = MIMEBase.MIMEBase(maintype, subtype)
     mime_attachment.add_header('Content-Disposition',
                                'attachment',
                                filename=attachment.filename())
-    mime_attachment.set_charset(encoding)
     mime_attachment.set_payload(attachment.data())
     result.attach(mime_attachment)
 
@@ -283,7 +325,7 @@
     result['Bcc'] = ', '.join(protocol_message.bcc_list())
 
   result['From'] = protocol_message.sender()
-  result['ReplyTo'] = protocol_message.replyto()
+  result['Reply-To'] = protocol_message.replyto()
   result['Subject'] = protocol_message.subject()
 
   return result
@@ -376,15 +418,8 @@
     if not hasattr(self, 'body') and not hasattr(self, 'html'):
       raise MissingBodyError()
     if hasattr(self, 'attachments'):
-      for filename, data in _attachment_sequence(self.attachments):
-        split_filename = filename.split('.')
-        if len(split_filename) < 2:
-          raise InvalidAttachmentTypeError()
-        if split_filename[-1] not in EXTENSION_WHITELIST:
-          raise InvalidAttachmentTypeError()
-        mime_type, encoding = mimetypes.guess_type(filename)
-        if mime_type is None:
-          raise InvalidAttachmentTypeError()
+      for file_name, data in _attachment_sequence(self.attachments):
+        _GetMimeType(file_name)
 
   def CheckInitialized(self):
     self.check_initialized()
--- a/thirdparty/google_appengine/google/appengine/api/memcache/__init__.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/memcache/__init__.py	Fri Apr 24 14:16:00 2009 +0000
@@ -31,6 +31,7 @@
 
 from google.appengine.api import api_base_pb
 from google.appengine.api import apiproxy_stub_map
+from google.appengine.api import namespace_manager
 from google.appengine.api.memcache import memcache_service_pb
 from google.appengine.runtime import apiproxy_errors
 
@@ -371,7 +372,7 @@
       return False
     return True
 
-  def get(self, key):
+  def get(self, key, namespace=None):
     """Looks up a single key in memcache.
 
     If you have multiple items to load, though, it's much more efficient
@@ -382,12 +383,15 @@
     Args:
       key: The key in memcache to look up.  See docs on Client
         for details of format.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       The value of the key, if found in memcache, else None.
     """
     request = MemcacheGetRequest()
     request.add_key(_key_string(key))
+    namespace_manager._add_name_space(request, namespace)
     response = MemcacheGetResponse()
     try:
       self._make_sync_call('memcache', 'Get', request, response)
@@ -401,7 +405,7 @@
                          response.item(0).flags(),
                          self._do_unpickle)
 
-  def get_multi(self, keys, key_prefix=''):
+  def get_multi(self, keys, key_prefix='', namespace=None):
     """Looks up multiple keys from memcache in one operation.
 
     This is the recommended way to do bulk loads.
@@ -414,6 +418,8 @@
         and not in any particular encoding.
       key_prefix: Prefix to prepend to all keys when talking to the server;
         not included in the returned dictionary.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       A dictionary of the keys and values that were present in memcache.
@@ -421,6 +427,7 @@
       the keys in the returned dictionary.
     """
     request = MemcacheGetRequest()
+    namespace_manager._add_name_space(request, namespace)
     response = MemcacheGetResponse()
     user_key = {}
     for key in keys:
@@ -437,7 +444,7 @@
       return_value[user_key[returned_item.key()]] = value
     return return_value
 
-  def delete(self, key, seconds=0):
+  def delete(self, key, seconds=0, namespace=None):
     """Deletes a key from memcache.
 
     Args:
@@ -448,6 +455,8 @@
         items can be immediately added.  With or without this option,
         a 'set' operation will always work.  Float values will be rounded up to
         the nearest whole second.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       DELETE_NETWORK_FAILURE (0) on network failure,
@@ -463,6 +472,7 @@
       raise ValueError('Delete timeout must be non-negative.')
 
     request = MemcacheDeleteRequest()
+    namespace_manager._add_name_space(request, namespace)
     response = MemcacheDeleteResponse()
 
     delete_item = request.add_item()
@@ -480,7 +490,7 @@
       return DELETE_ITEM_MISSING
     assert False, 'Unexpected deletion status code.'
 
-  def delete_multi(self, keys, seconds=0, key_prefix=''):
+  def delete_multi(self, keys, seconds=0, key_prefix='', namespace=None):
     """Delete multiple keys at once.
 
     Args:
@@ -493,6 +503,8 @@
         the nearest whole second.
       key_prefix: Prefix to put on all keys when sending specified
         keys to memcache.  See docs for get_multi() and set_multi().
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       True if all operations completed successfully.  False if one
@@ -504,6 +516,7 @@
       raise ValueError('Delete timeout must not be negative.')
 
     request = MemcacheDeleteRequest()
+    namespace_manager._add_name_space(request, namespace)
     response = MemcacheDeleteResponse()
 
     for key in keys:
@@ -516,7 +529,7 @@
       return False
     return True
 
-  def set(self, key, value, time=0, min_compress_len=0):
+  def set(self, key, value, time=0, min_compress_len=0, namespace=None):
     """Sets a key's value, regardless of previous contents in cache.
 
     Unlike add() and replace(), this method always sets (or
@@ -532,13 +545,16 @@
         memory pressure.  Float values will be rounded up to the nearest
         whole second.
       min_compress_len: Ignored option for compatibility.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       True if set.  False on error.
     """
-    return self._set_with_policy(MemcacheSetRequest.SET, key, value, time=time)
+    return self._set_with_policy(MemcacheSetRequest.SET, key, value, time=time,
+                                 namespace=namespace)
 
-  def add(self, key, value, time=0, min_compress_len=0):
+  def add(self, key, value, time=0, min_compress_len=0, namespace=None):
     """Sets a key's value, iff item is not already in memcache.
 
     Args:
@@ -550,13 +566,16 @@
         memory pressure.  Float values will be rounded up to the nearest
         whole second.
       min_compress_len: Ignored option for compatibility.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       True if added.  False on error.
     """
-    return self._set_with_policy(MemcacheSetRequest.ADD, key, value, time=time)
+    return self._set_with_policy(MemcacheSetRequest.ADD, key, value, time=time,
+                                 namespace=namespace)
 
-  def replace(self, key, value, time=0, min_compress_len=0):
+  def replace(self, key, value, time=0, min_compress_len=0, namespace=None):
     """Replaces a key's value, failing if item isn't already in memcache.
 
     Args:
@@ -568,14 +587,16 @@
         memory pressure.  Float values will be rounded up to the nearest
         whole second.
       min_compress_len: Ignored option for compatibility.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       True if replaced.  False on RPC error or cache miss.
     """
     return self._set_with_policy(MemcacheSetRequest.REPLACE,
-                                 key, value, time=time)
+                                 key, value, time=time, namespace=namespace)
 
-  def _set_with_policy(self, policy, key, value, time=0):
+  def _set_with_policy(self, policy, key, value, time=0, namespace=None):
     """Sets a single key with a specified policy.
 
     Helper function for set(), add(), and replace().
@@ -585,6 +606,8 @@
       key: Key to add, set, or replace.  See docs on Client for details.
       value: Value to set.
       time: Expiration time, defaulting to 0 (never expiring).
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       True if stored, False on RPC error or policy error, e.g. a replace
@@ -604,6 +627,7 @@
     item.set_flags(flags)
     item.set_set_policy(policy)
     item.set_expiration_time(int(math.ceil(time)))
+    namespace_manager._add_name_space(request, namespace)
     response = MemcacheSetResponse()
     try:
       self._make_sync_call('memcache', 'Set', request, response)
@@ -613,7 +637,8 @@
       return False
     return response.set_status(0) == MemcacheSetResponse.STORED
 
-  def _set_multi_with_policy(self, policy, mapping, time=0, key_prefix=''):
+  def _set_multi_with_policy(self, policy, mapping, time=0, key_prefix='',
+                             namespace=None):
     """Set multiple keys with a specified policy.
 
     Helper function for set_multi(), add_multi(), and replace_multi(). This
@@ -628,6 +653,8 @@
         memory pressure.  Float values will be rounded up to the nearest
         whole second.
       key_prefix: Prefix for to prepend to all keys.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       A list of keys whose values were NOT set.  On total success,
@@ -654,6 +681,7 @@
       item.set_flags(flags)
       item.set_set_policy(policy)
       item.set_expiration_time(int(math.ceil(time)))
+    namespace_manager._add_name_space(request, namespace)
 
     response = MemcacheSetResponse()
     try:
@@ -670,7 +698,8 @@
 
     return unset_list
 
-  def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
+  def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
+                namespace=None):
     """Set multiple keys' values at once, regardless of previous contents.
 
     Args:
@@ -682,15 +711,19 @@
         whole second.
       key_prefix: Prefix for to prepend to all keys.
       min_compress_len: Unimplemented compatibility option.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       A list of keys whose values were NOT set.  On total success,
       this list should be empty.
     """
     return self._set_multi_with_policy(MemcacheSetRequest.SET, mapping,
-                                       time=time, key_prefix=key_prefix)
+                                       time=time, key_prefix=key_prefix,
+                                       namespace=namespace)
 
-  def add_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
+  def add_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
+                namespace=None):
     """Set multiple keys' values iff items are not already in memcache.
 
     Args:
@@ -702,15 +735,19 @@
         whole second.
       key_prefix: Prefix for to prepend to all keys.
       min_compress_len: Unimplemented compatibility option.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       A list of keys whose values were NOT set because they did not already
       exist in memcache.  On total success, this list should be empty.
     """
     return self._set_multi_with_policy(MemcacheSetRequest.ADD, mapping,
-                                       time=time, key_prefix=key_prefix)
+                                       time=time, key_prefix=key_prefix,
+                                       namespace=namespace)
 
-  def replace_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
+  def replace_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
+                    namespace=None):
     """Replace multiple keys' values, failing if the items aren't in memcache.
 
     Args:
@@ -722,15 +759,18 @@
         whole second.
       key_prefix: Prefix for to prepend to all keys.
       min_compress_len: Unimplemented compatibility option.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       A list of keys whose values were NOT set because they already existed
       in memcache.  On total success, this list should be empty.
     """
     return self._set_multi_with_policy(MemcacheSetRequest.REPLACE, mapping,
-                                       time=time, key_prefix=key_prefix)
+                                       time=time, key_prefix=key_prefix,
+                                       namespace=namespace)
 
-  def incr(self, key, delta=1):
+  def incr(self, key, delta=1, namespace=None):
     """Atomically increments a key's value.
 
     Internally, the value is a unsigned 64-bit integer.  Memcache
@@ -746,6 +786,8 @@
       key: Key to increment.  See Client's docstring for details.
       delta: Non-negative integer value (int or long) to increment key by,
         defaulting to 1.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       New long integer value, or None if key was not in the cache, could not
@@ -756,9 +798,9 @@
       ValueError: If number is negative.
       TypeError: If delta isn't an int or long.
     """
-    return self._incrdecr(key, False, delta)
+    return self._incrdecr(key, False, delta, namespace=namespace)
 
-  def decr(self, key, delta=1):
+  def decr(self, key, delta=1, namespace=None):
     """Atomically decrements a key's value.
 
     Internally, the value is a unsigned 64-bit integer.  Memcache
@@ -771,6 +813,8 @@
       key: Key to decrement.  See Client's docstring for details.
       delta: Non-negative integer value (int or long) to decrement key by,
         defaulting to 1.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       New long integer value, or None if key wasn't in cache and couldn't
@@ -780,9 +824,9 @@
       ValueError: If number is negative.
       TypeError: If delta isn't an int or long.
     """
-    return self._incrdecr(key, True, delta)
+    return self._incrdecr(key, True, delta, namespace=namespace)
 
-  def _incrdecr(self, key, is_negative, delta):
+  def _incrdecr(self, key, is_negative, delta, namespace=None):
     """Increment or decrement a key by a provided delta.
 
     Args:
@@ -790,6 +834,8 @@
       is_negative: Boolean, if this is a decrement.
       delta: Non-negative integer amount (int or long) to increment
         or decrement by.
+      namespace: a string specifying an optional namespace to use in
+        the request.
 
     Returns:
       New long integer value, or None on cache miss or network/RPC/server
@@ -805,6 +851,7 @@
       raise ValueError('Delta must not be negative.')
 
     request = MemcacheIncrementRequest()
+    namespace_manager._add_name_space(request, namespace)
     response = MemcacheIncrementResponse()
     request.set_key(_key_string(key))
     request.set_delta(delta)
--- a/thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/memcache/memcache_service_pb.py	Fri Apr 24 14:16:00 2009 +0000
@@ -86,6 +86,8 @@
   _STYLE = """"""
   _STYLE_CONTENT_TYPE = """"""
 class MemcacheGetRequest(ProtocolBuffer.ProtocolMessage):
+  has_name_space_ = 0
+  name_space_ = ""
 
   def __init__(self, contents=None):
     self.key_ = []
@@ -106,16 +108,32 @@
   def clear_key(self):
     self.key_ = []
 
+  def name_space(self): return self.name_space_
+
+  def set_name_space(self, x):
+    self.has_name_space_ = 1
+    self.name_space_ = x
+
+  def clear_name_space(self):
+    if self.has_name_space_:
+      self.has_name_space_ = 0
+      self.name_space_ = ""
+
+  def has_name_space(self): return self.has_name_space_
+
 
   def MergeFrom(self, x):
     assert x is not self
     for i in xrange(x.key_size()): self.add_key(x.key(i))
+    if (x.has_name_space()): self.set_name_space(x.name_space())
 
   def Equals(self, x):
     if x is self: return 1
     if len(self.key_) != len(x.key_): return 0
     for e1, e2 in zip(self.key_, x.key_):
       if e1 != e2: return 0
+    if self.has_name_space_ != x.has_name_space_: return 0
+    if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -126,15 +144,20 @@
     n = 0
     n += 1 * len(self.key_)
     for i in xrange(len(self.key_)): n += self.lengthString(len(self.key_[i]))
+    if (self.has_name_space_): n += 1 + self.lengthString(len(self.name_space_))
     return n + 0
 
   def Clear(self):
     self.clear_key()
+    self.clear_name_space()
 
   def OutputUnchecked(self, out):
     for i in xrange(len(self.key_)):
       out.putVarInt32(10)
       out.putPrefixedString(self.key_[i])
+    if (self.has_name_space_):
+      out.putVarInt32(18)
+      out.putPrefixedString(self.name_space_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -142,6 +165,9 @@
       if tt == 10:
         self.add_key(d.getPrefixedString())
         continue
+      if tt == 18:
+        self.set_name_space(d.getPrefixedString())
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -154,19 +180,24 @@
       if printElemNumber: elm="(%d)" % cnt
       res+=prefix+("key%s: %s\n" % (elm, self.DebugFormatString(e)))
       cnt+=1
+    if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
     return res
 
   kkey = 1
+  kname_space = 2
 
   _TEXT = (
    "ErrorCode",
    "key",
+   "name_space",
   )
 
   _TYPES = (
    ProtocolBuffer.Encoder.NUMERIC,
    ProtocolBuffer.Encoder.STRING,
 
+   ProtocolBuffer.Encoder.STRING,
+
   )
 
   _STYLE = """"""
@@ -592,6 +623,8 @@
   def SetPolicy_Name(cls, x): return cls._SetPolicy_NAMES.get(x, "")
   SetPolicy_Name = classmethod(SetPolicy_Name)
 
+  has_name_space_ = 0
+  name_space_ = ""
 
   def __init__(self, contents=None):
     self.item_ = []
@@ -613,16 +646,32 @@
 
   def clear_item(self):
     self.item_ = []
+  def name_space(self): return self.name_space_
+
+  def set_name_space(self, x):
+    self.has_name_space_ = 1
+    self.name_space_ = x
+
+  def clear_name_space(self):
+    if self.has_name_space_:
+      self.has_name_space_ = 0
+      self.name_space_ = ""
+
+  def has_name_space(self): return self.has_name_space_
+
 
   def MergeFrom(self, x):
     assert x is not self
     for i in xrange(x.item_size()): self.add_item().CopyFrom(x.item(i))
+    if (x.has_name_space()): self.set_name_space(x.name_space())
 
   def Equals(self, x):
     if x is self: return 1
     if len(self.item_) != len(x.item_): return 0
     for e1, e2 in zip(self.item_, x.item_):
       if e1 != e2: return 0
+    if self.has_name_space_ != x.has_name_space_: return 0
+    if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -635,16 +684,21 @@
     n = 0
     n += 2 * len(self.item_)
     for i in xrange(len(self.item_)): n += self.item_[i].ByteSize()
+    if (self.has_name_space_): n += 1 + self.lengthString(len(self.name_space_))
     return n + 0
 
   def Clear(self):
     self.clear_item()
+    self.clear_name_space()
 
   def OutputUnchecked(self, out):
     for i in xrange(len(self.item_)):
       out.putVarInt32(11)
       self.item_[i].OutputUnchecked(out)
       out.putVarInt32(12)
+    if (self.has_name_space_):
+      out.putVarInt32(58)
+      out.putPrefixedString(self.name_space_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -652,6 +706,9 @@
       if tt == 11:
         self.add_item().TryMerge(d)
         continue
+      if tt == 58:
+        self.set_name_space(d.getPrefixedString())
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -666,6 +723,7 @@
       res+=e.__str__(prefix + "  ", printElemNumber)
       res+=prefix+"}\n"
       cnt+=1
+    if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
     return res
 
   kItemGroup = 1
@@ -674,6 +732,7 @@
   kItemflags = 4
   kItemset_policy = 5
   kItemexpiration_time = 6
+  kname_space = 7
 
   _TEXT = (
    "ErrorCode",
@@ -683,6 +742,7 @@
    "flags",
    "set_policy",
    "expiration_time",
+   "name_space",
   )
 
   _TYPES = (
@@ -699,6 +759,8 @@
 
    ProtocolBuffer.Encoder.FLOAT,
 
+   ProtocolBuffer.Encoder.STRING,
+
   )
 
   _STYLE = """"""
@@ -898,6 +960,8 @@
     return res
 
 class MemcacheDeleteRequest(ProtocolBuffer.ProtocolMessage):
+  has_name_space_ = 0
+  name_space_ = ""
 
   def __init__(self, contents=None):
     self.item_ = []
@@ -919,16 +983,32 @@
 
   def clear_item(self):
     self.item_ = []
+  def name_space(self): return self.name_space_
+
+  def set_name_space(self, x):
+    self.has_name_space_ = 1
+    self.name_space_ = x
+
+  def clear_name_space(self):
+    if self.has_name_space_:
+      self.has_name_space_ = 0
+      self.name_space_ = ""
+
+  def has_name_space(self): return self.has_name_space_
+
 
   def MergeFrom(self, x):
     assert x is not self
     for i in xrange(x.item_size()): self.add_item().CopyFrom(x.item(i))
+    if (x.has_name_space()): self.set_name_space(x.name_space())
 
   def Equals(self, x):
     if x is self: return 1
     if len(self.item_) != len(x.item_): return 0
     for e1, e2 in zip(self.item_, x.item_):
       if e1 != e2: return 0
+    if self.has_name_space_ != x.has_name_space_: return 0
+    if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -941,16 +1021,21 @@
     n = 0
     n += 2 * len(self.item_)
     for i in xrange(len(self.item_)): n += self.item_[i].ByteSize()
+    if (self.has_name_space_): n += 1 + self.lengthString(len(self.name_space_))
     return n + 0
 
   def Clear(self):
     self.clear_item()
+    self.clear_name_space()
 
   def OutputUnchecked(self, out):
     for i in xrange(len(self.item_)):
       out.putVarInt32(11)
       self.item_[i].OutputUnchecked(out)
       out.putVarInt32(12)
+    if (self.has_name_space_):
+      out.putVarInt32(34)
+      out.putPrefixedString(self.name_space_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -958,6 +1043,9 @@
       if tt == 11:
         self.add_item().TryMerge(d)
         continue
+      if tt == 34:
+        self.set_name_space(d.getPrefixedString())
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -972,17 +1060,20 @@
       res+=e.__str__(prefix + "  ", printElemNumber)
       res+=prefix+"}\n"
       cnt+=1
+    if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
     return res
 
   kItemGroup = 1
   kItemkey = 2
   kItemdelete_time = 3
+  kname_space = 4
 
   _TEXT = (
    "ErrorCode",
    "Item",
    "key",
    "delete_time",
+   "name_space",
   )
 
   _TYPES = (
@@ -993,6 +1084,8 @@
 
    ProtocolBuffer.Encoder.FLOAT,
 
+   ProtocolBuffer.Encoder.STRING,
+
   )
 
   _STYLE = """"""
@@ -1110,6 +1203,8 @@
 
   has_key_ = 0
   key_ = ""
+  has_name_space_ = 0
+  name_space_ = ""
   has_delta_ = 0
   delta_ = 1
   has_direction_ = 0
@@ -1131,6 +1226,19 @@
 
   def has_key(self): return self.has_key_
 
+  def name_space(self): return self.name_space_
+
+  def set_name_space(self, x):
+    self.has_name_space_ = 1
+    self.name_space_ = x
+
+  def clear_name_space(self):
+    if self.has_name_space_:
+      self.has_name_space_ = 0
+      self.name_space_ = ""
+
+  def has_name_space(self): return self.has_name_space_
+
   def delta(self): return self.delta_
 
   def set_delta(self, x):
@@ -1161,6 +1269,7 @@
   def MergeFrom(self, x):
     assert x is not self
     if (x.has_key()): self.set_key(x.key())
+    if (x.has_name_space()): self.set_name_space(x.name_space())
     if (x.has_delta()): self.set_delta(x.delta())
     if (x.has_direction()): self.set_direction(x.direction())
 
@@ -1168,6 +1277,8 @@
     if x is self: return 1
     if self.has_key_ != x.has_key_: return 0
     if self.has_key_ and self.key_ != x.key_: return 0
+    if self.has_name_space_ != x.has_name_space_: return 0
+    if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
     if self.has_delta_ != x.has_delta_: return 0
     if self.has_delta_ and self.delta_ != x.delta_: return 0
     if self.has_direction_ != x.has_direction_: return 0
@@ -1185,12 +1296,14 @@
   def ByteSize(self):
     n = 0
     n += self.lengthString(len(self.key_))
+    if (self.has_name_space_): n += 1 + self.lengthString(len(self.name_space_))
     if (self.has_delta_): n += 1 + self.lengthVarInt64(self.delta_)
     if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
     return n + 1
 
   def Clear(self):
     self.clear_key()
+    self.clear_name_space()
     self.clear_delta()
     self.clear_direction()
 
@@ -1203,6 +1316,9 @@
     if (self.has_direction_):
       out.putVarInt32(24)
       out.putVarInt32(self.direction_)
+    if (self.has_name_space_):
+      out.putVarInt32(34)
+      out.putPrefixedString(self.name_space_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -1216,6 +1332,9 @@
       if tt == 24:
         self.set_direction(d.getVarInt32())
         continue
+      if tt == 34:
+        self.set_name_space(d.getPrefixedString())
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -1223,11 +1342,13 @@
   def __str__(self, prefix="", printElemNumber=0):
     res=""
     if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
+    if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
     if self.has_delta_: res+=prefix+("delta: %s\n" % self.DebugFormatInt64(self.delta_))
     if self.has_direction_: res+=prefix+("direction: %s\n" % self.DebugFormatInt32(self.direction_))
     return res
 
   kkey = 1
+  kname_space = 4
   kdelta = 2
   kdirection = 3
 
@@ -1236,6 +1357,7 @@
    "key",
    "delta",
    "direction",
+   "name_space",
   )
 
   _TYPES = (
@@ -1246,6 +1368,8 @@
 
    ProtocolBuffer.Encoder.NUMERIC,
 
+   ProtocolBuffer.Encoder.STRING,
+
   )
 
   _STYLE = """"""
--- a/thirdparty/google_appengine/google/appengine/api/memcache/memcache_stub.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/memcache/memcache_stub.py	Fri Apr 24 14:16:00 2009 +0000
@@ -119,23 +119,27 @@
     self._byte_hits = 0
     self._cache_creation_time = self._gettime()
 
-  def _GetKey(self, key):
+  def _GetKey(self, namespace, key):
     """Retrieves a CacheEntry from the cache if it hasn't expired.
 
     Does not take deletion timeout into account.
 
     Args:
+      namespace: The namespace that keys are stored under.
       key: The key to retrieve from the cache.
 
     Returns:
       The corresponding CacheEntry instance, or None if it was not found or
       has already expired.
     """
-    entry = self._the_cache.get(key, None)
+    namespace_dict = self._the_cache.get(namespace, None)
+    if namespace_dict is None:
+      return None
+    entry = namespace_dict.get(key, None)
     if entry is None:
       return None
     elif entry.CheckExpired():
-      del self._the_cache[key]
+      del namespace_dict[key]
       return None
     else:
       return entry
@@ -147,9 +151,10 @@
       request: A MemcacheGetRequest.
       response: A MemcacheGetResponse.
     """
+    namespace = request.name_space()
     keys = set(request.key_list())
     for key in keys:
-      entry = self._GetKey(key)
+      entry = self._GetKey(namespace, key)
       if entry is None or entry.CheckLocked():
         self._misses += 1
         continue
@@ -167,10 +172,11 @@
       request: A MemcacheSetRequest.
       response: A MemcacheSetResponse.
     """
+    namespace = request.name_space()
     for item in request.item_list():
       key = item.key()
       set_policy = item.set_policy()
-      old_entry = self._GetKey(key)
+      old_entry = self._GetKey(namespace, key)
 
       set_status = MemcacheSetResponse.NOT_STORED
       if ((set_policy == MemcacheSetRequest.SET) or
@@ -180,10 +186,12 @@
         if (old_entry is None or
             set_policy == MemcacheSetRequest.SET
             or not old_entry.CheckLocked()):
-          self._the_cache[key] = CacheEntry(item.value(),
-                                            item.expiration_time(),
-                                            item.flags(),
-                                            gettime=self._gettime)
+          if namespace not in self._the_cache:
+            self._the_cache[namespace] = {}
+          self._the_cache[namespace][key] = CacheEntry(item.value(),
+                                                       item.expiration_time(),
+                                                       item.flags(),
+                                                       gettime=self._gettime)
           set_status = MemcacheSetResponse.STORED
 
       response.add_set_status(set_status)
@@ -195,15 +203,16 @@
       request: A MemcacheDeleteRequest.
       response: A MemcacheDeleteResponse.
     """
+    namespace = request.name_space()
     for item in request.item_list():
       key = item.key()
-      entry = self._GetKey(key)
+      entry = self._GetKey(namespace, key)
 
       delete_status = MemcacheDeleteResponse.DELETED
       if entry is None:
         delete_status = MemcacheDeleteResponse.NOT_FOUND
       elif item.delete_time() == 0:
-        del self._the_cache[key]
+        del self._the_cache[namespace][key]
       else:
         entry.ExpireAndLock(item.delete_time())
 
@@ -216,8 +225,9 @@
       request: A MemcacheIncrementRequest.
       response: A MemcacheIncrementResponse.
     """
+    namespace = request.name_space()
     key = request.key()
-    entry = self._GetKey(key)
+    entry = self._GetKey(namespace, key)
     if entry is None:
       return
 
@@ -225,7 +235,7 @@
       old_value = long(entry.value)
       if old_value < 0:
         raise ValueError
-    except ValueError, e:
+    except ValueError:
       logging.error('Increment/decrement failed: Could not interpret '
                     'value for key = "%s" as an unsigned integer.', key)
       return
@@ -262,11 +272,13 @@
     stats.set_hits(self._hits)
     stats.set_misses(self._misses)
     stats.set_byte_hits(self._byte_hits)
-    stats.set_items(len(self._the_cache))
-
+    items = 0
     total_bytes = 0
-    for key, entry in self._the_cache.iteritems():
-      total_bytes += len(entry.value)
+    for namespace in self._the_cache.itervalues():
+      items += len(namespace)
+      for entry in namespace.itervalues():
+        total_bytes += len(entry.value)
+    stats.set_items(items)
     stats.set_bytes(total_bytes)
 
     stats.set_oldest_item_age(self._gettime() - self._cache_creation_time)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/namespace_manager/__init__.py	Fri Apr 24 14:16:00 2009 +0000
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Control the namespacing system used by various APIs.
+
+Each API call can specify an alternate namespace, but the functions
+here can be used to change the default namespace. The default is set
+before user code begins executing.
+"""
+
+
+
+import os
+
+ENV_DEFAULT_NAMESPACE = 'HTTP_X_APPENGINE_DEFAULT_NAMESPACE'
+
+__default_namespace = None
+
+def set_request_namespace(namespace):
+  """Set the default namespace to use for future calls, for this request only.
+
+  Args:
+    namespace: A string naming the new namespace to use. The empty
+      string specifies the root namespace for this app.
+  """
+  global __default_namespace
+  __default_namespace = namespace
+
+
+def get_request_namespace():
+  """Get the name of the current default namespace. The empty string
+  indicates that the root namespace is the default."""
+  global __default_namespace
+  if __default_namespace is None:
+    if ENV_DEFAULT_NAMESPACE in os.environ:
+      __default_namespace = os.environ[ENV_DEFAULT_NAMESPACE]
+    else:
+      __default_namespace = ''
+  return __default_namespace
+
+
+def _add_name_space(request, namespace=None):
+  """Add a name_space field to a request.
+
+  Args:
+    request: A protocol buffer supporting the set_name_space() operation.
+    namespace: The name of the namespace part. If None, use the
+      default namespace.
+  """
+  if namespace is None:
+    request.set_name_space(get_request_namespace())
+  else:
+    request.set_name_space(namespace)
--- a/thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/urlfetch_service_pb.py	Fri Apr 24 14:16:00 2009 +0000
@@ -22,7 +22,7 @@
 __pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
                    unusednames=printElemNumber,debug_strs no-special"""
 
-from google.appengine.api.api_base_pb import StringProto
+from google.appengine.api.api_base_pb import *
 class URLFetchServiceError(ProtocolBuffer.ProtocolMessage):
 
   OK           =    0
--- a/thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/urlfetch_stub.py	Fri Apr 24 14:16:00 2009 +0000
@@ -55,7 +55,6 @@
   'content-length',
   'host',
   'referer',
-  'user-agent',
   'vary',
   'via',
   'x-forwarded-for',
@@ -104,6 +103,11 @@
       raise apiproxy_errors.ApplicationError(
         urlfetch_service_pb.URLFetchServiceError.INVALID_URL)
 
+    if not host:
+      logging.error('Missing host.')
+      raise apiproxy_errors.ApplicationError(
+          urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR)
+
     sanitized_headers = self._SanitizeHttpHeaders(_UNTRUSTED_REQUEST_HEADERS,
                                                   request.header_list())
     request.clear_header()
@@ -146,7 +150,12 @@
           'urlfetch received %s ; port %s is not allowed in production!' %
           (url, port))
 
-      if host == '' and protocol == '':
+      if protocol and not host:
+        logging.error('Missing host on redirect; target url is %s' % url)
+        raise apiproxy_errors.ApplicationError(
+          urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR)
+
+      if not host and not protocol:
         host = last_host
         protocol = last_protocol
 
--- a/thirdparty/google_appengine/google/appengine/api/users.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/api/users.py	Fri Apr 24 14:16:00 2009 +0000
@@ -33,7 +33,6 @@
 import os
 from google.appengine.api import apiproxy_stub_map
 from google.appengine.api import user_service_pb
-from google.appengine.api import api_base_pb
 from google.appengine.runtime import apiproxy_errors
 
 
@@ -50,6 +49,7 @@
   """Raised by UserService calls if the generated redirect URL was too long.
   """
 
+
 class NotAllowedError(Error):
   """Raised by UserService calls if the requested redirect URL is not allowed.
   """
@@ -58,7 +58,7 @@
 class User(object):
   """A user.
 
-  We provide here the email address, nickname, and auth domain for a user.
+  We provide the email address, nickname, auth domain, and id for a user.
 
   A nickname is a human-readable string which uniquely identifies a Google
   user, akin to a username. It will be an email address for some users, but
@@ -66,12 +66,18 @@
   """
 
 
-  def __init__(self, email=None, _auth_domain=None):
+  __user_id = None
+
+  def __init__(self, email=None, _auth_domain=None, _user_id=None):
     """Constructor.
 
     Args:
-      # email is optional. it defaults to the current user.
-      email: string
+      email: An optional string of the user's email address. It defaults to
+          the current user's email address.
+
+    Raises:
+      UserNotFoundError: Raised if the user is not logged in and the email
+          argument is empty.
     """
     if _auth_domain is None:
       _auth_domain = os.environ.get('AUTH_DOMAIN')
@@ -83,12 +89,15 @@
     if email is None:
       assert 'USER_EMAIL' in os.environ
       email = os.environ['USER_EMAIL']
+      if _user_id is None and 'USER_ID' in os.environ:
+        _user_id = os.environ['USER_ID']
 
     if not email:
       raise UserNotFoundError
 
     self.__email = email
     self.__auth_domain = _auth_domain
+    self.__user_id = _user_id or None
 
   def nickname(self):
     """Return this user's nickname.
@@ -108,6 +117,13 @@
     """Return this user's email address."""
     return self.__email
 
+  def user_id(self):
+    """Return either a permanent unique identifying string or None.
+
+    If the email address was set explicity, this will return None.
+    """
+    return self.__user_id
+
   def auth_domain(self):
     """Return this user's auth domain."""
     return self.__auth_domain
@@ -119,7 +135,11 @@
     return str(self.nickname())
 
   def __repr__(self):
-    return "users.User(email='%s')" % self.email()
+    if self.__user_id:
+      return "users.User(email='%s',_user_id='%s')" % (self.email(),
+                                                       self.user_id())
+    else:
+      return "users.User(email='%s')" % self.email()
 
   def __hash__(self):
     return hash((self.__email, self.__auth_domain))
@@ -152,7 +172,7 @@
         user_service_pb.UserServiceError.REDIRECT_URL_TOO_LONG):
       raise RedirectTooLongError
     elif (e.application_error ==
-        user_service_pb.UserServiceError.NOT_ALLOWED):
+          user_service_pb.UserServiceError.NOT_ALLOWED):
       raise NotAllowedError
     else:
       raise e
@@ -205,6 +225,6 @@
   the User class, because admin status is not persisted in the datastore. It
   only exists for the user making this request right now.
   """
-  return (os.environ.get('USER_IS_ADMIN', '0')) == "1"
+  return (os.environ.get('USER_IS_ADMIN', '0')) == '1'
 
 IsCurrentUserAdmin = is_current_user_admin
--- a/thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/datastore/datastore_pb.py	Fri Apr 24 14:16:00 2009 +0000
@@ -1183,6 +1183,12 @@
 class Cost(ProtocolBuffer.ProtocolMessage):
   has_index_writes_ = 0
   index_writes_ = 0
+  has_index_write_bytes_ = 0
+  index_write_bytes_ = 0
+  has_entity_writes_ = 0
+  entity_writes_ = 0
+  has_entity_write_bytes_ = 0
+  entity_write_bytes_ = 0
 
   def __init__(self, contents=None):
     if contents is not None: self.MergeFromString(contents)
@@ -1200,15 +1206,63 @@
 
   def has_index_writes(self): return self.has_index_writes_
 
+  def index_write_bytes(self): return self.index_write_bytes_
+
+  def set_index_write_bytes(self, x):
+    self.has_index_write_bytes_ = 1
+    self.index_write_bytes_ = x
+
+  def clear_index_write_bytes(self):
+    if self.has_index_write_bytes_:
+      self.has_index_write_bytes_ = 0
+      self.index_write_bytes_ = 0
+
+  def has_index_write_bytes(self): return self.has_index_write_bytes_
+
+  def entity_writes(self): return self.entity_writes_
+
+  def set_entity_writes(self, x):
+    self.has_entity_writes_ = 1
+    self.entity_writes_ = x
+
+  def clear_entity_writes(self):
+    if self.has_entity_writes_:
+      self.has_entity_writes_ = 0
+      self.entity_writes_ = 0
+
+  def has_entity_writes(self): return self.has_entity_writes_
+
+  def entity_write_bytes(self): return self.entity_write_bytes_
+
+  def set_entity_write_bytes(self, x):
+    self.has_entity_write_bytes_ = 1
+    self.entity_write_bytes_ = x
+
+  def clear_entity_write_bytes(self):
+    if self.has_entity_write_bytes_:
+      self.has_entity_write_bytes_ = 0
+      self.entity_write_bytes_ = 0
+
+  def has_entity_write_bytes(self): return self.has_entity_write_bytes_
+
 
   def MergeFrom(self, x):
     assert x is not self
     if (x.has_index_writes()): self.set_index_writes(x.index_writes())
+    if (x.has_index_write_bytes()): self.set_index_write_bytes(x.index_write_bytes())
+    if (x.has_entity_writes()): self.set_entity_writes(x.entity_writes())
+    if (x.has_entity_write_bytes()): self.set_entity_write_bytes(x.entity_write_bytes())
 
   def Equals(self, x):
     if x is self: return 1
     if self.has_index_writes_ != x.has_index_writes_: return 0
     if self.has_index_writes_ and self.index_writes_ != x.index_writes_: return 0
+    if self.has_index_write_bytes_ != x.has_index_write_bytes_: return 0
+    if self.has_index_write_bytes_ and self.index_write_bytes_ != x.index_write_bytes_: return 0
+    if self.has_entity_writes_ != x.has_entity_writes_: return 0
+    if self.has_entity_writes_ and self.entity_writes_ != x.entity_writes_: return 0
+    if self.has_entity_write_bytes_ != x.has_entity_write_bytes_: return 0
+    if self.has_entity_write_bytes_ and self.entity_write_bytes_ != x.entity_write_bytes_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -1218,15 +1272,30 @@
   def ByteSize(self):
     n = 0
     if (self.has_index_writes_): n += 1 + self.lengthVarInt64(self.index_writes_)
+    if (self.has_index_write_bytes_): n += 1 + self.lengthVarInt64(self.index_write_bytes_)
+    if (self.has_entity_writes_): n += 1 + self.lengthVarInt64(self.entity_writes_)
+    if (self.has_entity_write_bytes_): n += 1 + self.lengthVarInt64(self.entity_write_bytes_)
     return n + 0
 
   def Clear(self):
     self.clear_index_writes()
+    self.clear_index_write_bytes()
+    self.clear_entity_writes()
+    self.clear_entity_write_bytes()
 
   def OutputUnchecked(self, out):
     if (self.has_index_writes_):
       out.putVarInt32(8)
       out.putVarInt32(self.index_writes_)
+    if (self.has_index_write_bytes_):
+      out.putVarInt32(16)
+      out.putVarInt32(self.index_write_bytes_)
+    if (self.has_entity_writes_):
+      out.putVarInt32(24)
+      out.putVarInt32(self.entity_writes_)
+    if (self.has_entity_write_bytes_):
+      out.putVarInt32(32)
+      out.putVarInt32(self.entity_write_bytes_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -1234,6 +1303,15 @@
       if tt == 8:
         self.set_index_writes(d.getVarInt32())
         continue
+      if tt == 16:
+        self.set_index_write_bytes(d.getVarInt32())
+        continue
+      if tt == 24:
+        self.set_entity_writes(d.getVarInt32())
+        continue
+      if tt == 32:
+        self.set_entity_write_bytes(d.getVarInt32())
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -1241,19 +1319,34 @@
   def __str__(self, prefix="", printElemNumber=0):
     res=""
     if self.has_index_writes_: res+=prefix+("index_writes: %s\n" % self.DebugFormatInt32(self.index_writes_))
+    if self.has_index_write_bytes_: res+=prefix+("index_write_bytes: %s\n" % self.DebugFormatInt32(self.index_write_bytes_))
+    if self.has_entity_writes_: res+=prefix+("entity_writes: %s\n" % self.DebugFormatInt32(self.entity_writes_))
+    if self.has_entity_write_bytes_: res+=prefix+("entity_write_bytes: %s\n" % self.DebugFormatInt32(self.entity_write_bytes_))
     return res
 
   kindex_writes = 1
+  kindex_write_bytes = 2
+  kentity_writes = 3
+  kentity_write_bytes = 4
 
   _TEXT = (
    "ErrorCode",
    "index_writes",
+   "index_write_bytes",
+   "entity_writes",
+   "entity_write_bytes",
   )
 
   _TYPES = (
    ProtocolBuffer.Encoder.NUMERIC,
    ProtocolBuffer.Encoder.NUMERIC,
 
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
+   ProtocolBuffer.Encoder.NUMERIC,
+
   )
 
   _STYLE = """"""
--- a/thirdparty/google_appengine/google/appengine/datastore/entity_pb.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/datastore/entity_pb.py	Fri Apr 24 14:16:00 2009 +0000
@@ -247,6 +247,8 @@
   nickname_ = ""
   has_gaiaid_ = 0
   gaiaid_ = 0
+  has_obfuscated_gaiaid_ = 0
+  obfuscated_gaiaid_ = ""
 
   def __init__(self, contents=None):
     if contents is not None: self.MergeFromString(contents)
@@ -303,6 +305,19 @@
 
   def has_gaiaid(self): return self.has_gaiaid_
 
+  def obfuscated_gaiaid(self): return self.obfuscated_gaiaid_
+
+  def set_obfuscated_gaiaid(self, x):
+    self.has_obfuscated_gaiaid_ = 1
+    self.obfuscated_gaiaid_ = x
+
+  def clear_obfuscated_gaiaid(self):
+    if self.has_obfuscated_gaiaid_:
+      self.has_obfuscated_gaiaid_ = 0
+      self.obfuscated_gaiaid_ = ""
+
+  def has_obfuscated_gaiaid(self): return self.has_obfuscated_gaiaid_
+
 
   def MergeFrom(self, x):
     assert x is not self
@@ -310,6 +325,7 @@
     if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
     if (x.has_nickname()): self.set_nickname(x.nickname())
     if (x.has_gaiaid()): self.set_gaiaid(x.gaiaid())
+    if (x.has_obfuscated_gaiaid()): self.set_obfuscated_gaiaid(x.obfuscated_gaiaid())
 
   def Equals(self, x):
     if x is self: return 1
@@ -321,6 +337,8 @@
     if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
     if self.has_gaiaid_ != x.has_gaiaid_: return 0
     if self.has_gaiaid_ and self.gaiaid_ != x.gaiaid_: return 0
+    if self.has_obfuscated_gaiaid_ != x.has_obfuscated_gaiaid_: return 0
+    if self.has_obfuscated_gaiaid_ and self.obfuscated_gaiaid_ != x.obfuscated_gaiaid_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -345,6 +363,7 @@
     n += self.lengthString(len(self.auth_domain_))
     if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
     n += self.lengthVarInt64(self.gaiaid_)
+    if (self.has_obfuscated_gaiaid_): n += 2 + self.lengthString(len(self.obfuscated_gaiaid_))
     return n + 4
 
   def Clear(self):
@@ -352,6 +371,7 @@
     self.clear_auth_domain()
     self.clear_nickname()
     self.clear_gaiaid()
+    self.clear_obfuscated_gaiaid()
 
   def OutputUnchecked(self, out):
     out.putVarInt32(74)
@@ -363,6 +383,9 @@
       out.putPrefixedString(self.nickname_)
     out.putVarInt32(144)
     out.putVarInt64(self.gaiaid_)
+    if (self.has_obfuscated_gaiaid_):
+      out.putVarInt32(154)
+      out.putPrefixedString(self.obfuscated_gaiaid_)
 
   def TryMerge(self, d):
     while 1:
@@ -380,6 +403,9 @@
       if tt == 144:
         self.set_gaiaid(d.getVarInt64())
         continue
+      if tt == 154:
+        self.set_obfuscated_gaiaid(d.getPrefixedString())
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -390,6 +416,7 @@
     if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
     if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
     if self.has_gaiaid_: res+=prefix+("gaiaid: %s\n" % self.DebugFormatInt64(self.gaiaid_))
+    if self.has_obfuscated_gaiaid_: res+=prefix+("obfuscated_gaiaid: %s\n" % self.DebugFormatString(self.obfuscated_gaiaid_))
     return res
 
 class PropertyValue_ReferenceValue(ProtocolBuffer.ProtocolMessage):
@@ -768,6 +795,7 @@
   kUserValueauth_domain = 10
   kUserValuenickname = 11
   kUserValuegaiaid = 18
+  kUserValueobfuscated_gaiaid = 19
   kReferenceValueGroup = 12
   kReferenceValueapp = 13
   kReferenceValuePathElementGroup = 14
@@ -795,6 +823,7 @@
    "id",
    "name",
    "gaiaid",
+   "obfuscated_gaiaid",
   )
 
   _TYPES = (
@@ -835,6 +864,8 @@
 
    ProtocolBuffer.Encoder.NUMERIC,
 
+   ProtocolBuffer.Encoder.STRING,
+
   )
 
   _STYLE = """"""
@@ -1478,6 +1509,8 @@
   nickname_ = ""
   has_gaiaid_ = 0
   gaiaid_ = 0
+  has_obfuscated_gaiaid_ = 0
+  obfuscated_gaiaid_ = ""
 
   def __init__(self, contents=None):
     if contents is not None: self.MergeFromString(contents)
@@ -1534,6 +1567,19 @@
 
   def has_gaiaid(self): return self.has_gaiaid_
 
+  def obfuscated_gaiaid(self): return self.obfuscated_gaiaid_
+
+  def set_obfuscated_gaiaid(self, x):
+    self.has_obfuscated_gaiaid_ = 1
+    self.obfuscated_gaiaid_ = x
+
+  def clear_obfuscated_gaiaid(self):
+    if self.has_obfuscated_gaiaid_:
+      self.has_obfuscated_gaiaid_ = 0
+      self.obfuscated_gaiaid_ = ""
+
+  def has_obfuscated_gaiaid(self): return self.has_obfuscated_gaiaid_
+
 
   def MergeFrom(self, x):
     assert x is not self
@@ -1541,6 +1587,7 @@
     if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
     if (x.has_nickname()): self.set_nickname(x.nickname())
     if (x.has_gaiaid()): self.set_gaiaid(x.gaiaid())
+    if (x.has_obfuscated_gaiaid()): self.set_obfuscated_gaiaid(x.obfuscated_gaiaid())
 
   def Equals(self, x):
     if x is self: return 1
@@ -1552,6 +1599,8 @@
     if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
     if self.has_gaiaid_ != x.has_gaiaid_: return 0
     if self.has_gaiaid_ and self.gaiaid_ != x.gaiaid_: return 0
+    if self.has_obfuscated_gaiaid_ != x.has_obfuscated_gaiaid_: return 0
+    if self.has_obfuscated_gaiaid_ and self.obfuscated_gaiaid_ != x.obfuscated_gaiaid_: return 0
     return 1
 
   def IsInitialized(self, debug_strs=None):
@@ -1576,6 +1625,7 @@
     n += self.lengthString(len(self.auth_domain_))
     if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
     n += self.lengthVarInt64(self.gaiaid_)
+    if (self.has_obfuscated_gaiaid_): n += 1 + self.lengthString(len(self.obfuscated_gaiaid_))
     return n + 3
 
   def Clear(self):
@@ -1583,6 +1633,7 @@
     self.clear_auth_domain()
     self.clear_nickname()
     self.clear_gaiaid()
+    self.clear_obfuscated_gaiaid()
 
   def OutputUnchecked(self, out):
     out.putVarInt32(10)
@@ -1594,6 +1645,9 @@
       out.putPrefixedString(self.nickname_)
     out.putVarInt32(32)
     out.putVarInt64(self.gaiaid_)
+    if (self.has_obfuscated_gaiaid_):
+      out.putVarInt32(42)
+      out.putPrefixedString(self.obfuscated_gaiaid_)
 
   def TryMerge(self, d):
     while d.avail() > 0:
@@ -1610,6 +1664,9 @@
       if tt == 32:
         self.set_gaiaid(d.getVarInt64())
         continue
+      if tt == 42:
+        self.set_obfuscated_gaiaid(d.getPrefixedString())
+        continue
       if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
       d.skipData(tt)
 
@@ -1620,12 +1677,14 @@
     if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
     if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
     if self.has_gaiaid_: res+=prefix+("gaiaid: %s\n" % self.DebugFormatInt64(self.gaiaid_))
+    if self.has_obfuscated_gaiaid_: res+=prefix+("obfuscated_gaiaid: %s\n" % self.DebugFormatString(self.obfuscated_gaiaid_))
     return res
 
   kemail = 1
   kauth_domain = 2
   knickname = 3
   kgaiaid = 4
+  kobfuscated_gaiaid = 5
 
   _TEXT = (
    "ErrorCode",
@@ -1633,6 +1692,7 @@
    "auth_domain",
    "nickname",
    "gaiaid",
+   "obfuscated_gaiaid",
   )
 
   _TYPES = (
@@ -1645,6 +1705,8 @@
 
    ProtocolBuffer.Encoder.NUMERIC,
 
+   ProtocolBuffer.Encoder.STRING,
+
   )
 
   _STYLE = """"""
--- a/thirdparty/google_appengine/google/appengine/dist/py_imp.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/py_imp.py	Fri Apr 24 14:16:00 2009 +0000
@@ -27,31 +27,88 @@
 
 
 def get_magic():
+  """Return the magic string used to recognize byte-compiled code files."""
   return '\0\0\0\0'
 
 
+_PY_SOURCE_SUFFIX = ('.py', 'U', PY_SOURCE)
+_PKG_DIRECTORY_SUFFIX = ('', '', PKG_DIRECTORY)
+
+
 def get_suffixes():
-  return [('.py', 'U', PY_SOURCE)]
+  """Return a list that describes the files that find_module() looks for."""
+  return [_PY_SOURCE_SUFFIX]
+
+
+def find_module(name, path=None):
+  """Try to find the named module on the given search path or sys.path."""
+  if path == None:
+    path = sys.path
+
+  for directory in path:
+    filename = os.path.join(directory, '%s.py' % name)
+    if os.path.exists(filename):
+      return open(filename, 'U'), filename, _PY_SOURCE_SUFFIX
+
+    dirname = os.path.join(directory, name)
+    filename = os.path.join(dirname, '__init__.py')
+    if os.path.exists(filename):
+      return None, dirname, _PKG_DIRECTORY_SUFFIX
+
+  raise ImportError('No module named %s' % name)
+
+
+def load_module(name, file_, pathname, description):
+  """Load or reload the specified module.
+
+  Please note that this function has only rudimentary supported on App Engine:
+  Only loading packages is supported.
+  """
+  suffix, mode, type_ = description
+  if type_ == PKG_DIRECTORY:
+    if name in sys.modules:
+      mod = sys.modules[name]
+    else:
+      mod = new_module(name)
+      sys.modules[name] = mod
+    filename = os.path.join(pathname, '__init__.py')
+    mod.__file__ = filename
+    execfile(filename, mod.__dict__, mod.__dict__)
+    return mod
+  else:
+    raise NotImplementedError('Only importing packages is supported on '
+                              'App Engine')
 
 
 def new_module(name):
-  return type(sys.modules[__name__])(name)
+  """Return a new empty module object."""
+  return type(sys)(name)
 
 
 def lock_held():
   """Return False since threading is not supported."""
   return False
 
+
 def acquire_lock():
   """Acquiring the lock is a no-op since no threading is supported."""
   pass
 
+
 def release_lock():
   """There is no lock to release since acquiring is a no-op when there is no
   threading."""
   pass
 
 
+def init_builtin(name):
+  raise NotImplementedError('This function is not supported on App Engine.')
+
+
+def init_frozen(name):
+  raise NotImplementedError('This function is not supported on App Engine.')
+
+
 def is_builtin(name):
   return name in sys.builtin_module_names
 
@@ -60,7 +117,20 @@
   return False
 
 
+def load_compiled(name, pathname, file_=None):
+  raise NotImplementedError('This function is not supported on App Engine.')
+
+
+def load_dynamic(name, pathname, file_=None):
+  raise NotImplementedError('This function is not supported on App Engine.')
+
+
+def load_source(name, pathname, file_=None):
+  raise NotImplementedError('This function is not supported on App Engine.')
+
+
 class NullImporter(object):
+  """Null importer object"""
 
   def __init__(self, path_string):
     if not path_string:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/thirdparty/google_appengine/google/appengine/dist/py_zipimport.py	Fri Apr 24 14:16:00 2009 +0000
@@ -0,0 +1,288 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Pure Python zipfile importer.
+
+This approximates the standard zipimport module, which isn't supported
+by Google App Engine.  See PEP 302 for more information about the API
+for import hooks.
+
+Usage:
+  import py_zipimport
+
+As a side effect of importing, the module overrides sys.path_hooks,
+and also creates an alias 'zipimport' for itself.  When your app is
+running in Google App Engine production, you don't even need to import
+it, since this is already done for you.  In the Google App Engine SDK
+this module is not used; instead, the standard zipimport module is
+used.
+"""
+
+
+__all__ = ['ZipImportError', 'zipimporter']
+
+
+import os
+import sys
+import types
+import UserDict
+import zipfile
+
+
+_SEARCH_ORDER = [
+
+    ('.py', False),
+    ('/__init__.py', True),
+]
+
+
+_zipfile_cache = {}
+
+
+class ZipImportError(ImportError):
+  """Exception raised by zipimporter objects."""
+
+
+class zipimporter:
+  """A PEP-302-style importer that can import from a zipfile.
+
+  Just insert or append this class (not an instance) to sys.path_hooks
+  and you're in business.  Instances satisfy both the 'importer' and
+  'loader' APIs specified in PEP 302.
+  """
+
+  def __init__(self, path_entry):
+    """Constructor.
+
+    Args:
+      path_entry: The entry in sys.path.  This should be the name of an
+        existing zipfile possibly with a path separator and a prefix
+        path within the archive appended, e.g. /x/django.zip or
+        /x/django.zip/foo/bar.
+
+    Raises:
+      ZipImportError if the path_entry does not represent a valid
+      zipfile with optional prefix.
+    """
+    archive = path_entry
+    prefix = ''
+    while not os.path.lexists(archive):
+      head, tail = os.path.split(archive)
+      if head == archive:
+        msg = 'Nothing found for %r' % path_entry
+        raise ZipImportError(msg)
+      archive = head
+      prefix = os.path.join(tail, prefix)
+    if not os.path.isfile(archive):
+      msg = 'Non-file %r found for %r' % (archive, path_entry)
+      raise ZipImportError(msg)
+    self.archive = archive
+    self.prefix = os.path.join(prefix, '')
+    self.zipfile = _zipfile_cache.get(archive)
+    if self.zipfile is None:
+      try:
+        self.zipfile = zipfile.ZipFile(self.archive)
+      except (EnvironmentError, zipfile.BadZipfile), err:
+        msg = 'Can\'t open zipfile %s: %s: %s' % (self.archive,
+                                                  err.__class__.__name__, err)
+        import logging
+        logging.warn(msg)
+        raise ZipImportError(msg)
+      else:
+        _zipfile_cache[archive] = self.zipfile
+        import logging
+        logging.info('zipimporter(%r, %r)', archive, prefix)
+
+  def __repr__(self):
+    """Return a string representation matching zipimport.c."""
+    name = self.archive
+    if self.prefix:
+      name = os.path.join(name, self.prefix)
+    return '<zipimporter object "%s">' % name
+
+  def _get_info(self, fullmodname):
+    """Internal helper for find_module() and load_module().
+
+    Args:
+      fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
+
+    Returns:
+      A tuple (submodname, is_package, relpath) where:
+        submodname: The final component of the module name, e.g. 'mail'.
+        is_package: A bool indicating whether this is a package.
+        relpath: The path to the module's source code within to the zipfile.
+
+    Raises:
+      ImportError if the module is not found in the archive.
+    """
+    parts = fullmodname.split('.')
+    submodname = parts[-1]
+    for suffix, is_package in _SEARCH_ORDER:
+      relpath = os.path.join(self.prefix,
+                             submodname + suffix.replace('/', os.sep))
+      try:
+        self.zipfile.getinfo(relpath.replace(os.sep, '/'))
+      except KeyError:
+        pass
+      else:
+        return submodname, is_package, relpath
+    msg = ('Can\'t find module %s in zipfile %s with prefix %r' %
+           (fullmodname, self.archive, self.prefix))
+    raise ZipImportError(msg)
+
+  def _get_source(self, fullmodname):
+    """Internal helper for load_module().
+
+    Args:
+      fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
+
+    Returns:
+      A tuple (submodname, is_package, fullpath, source) where:
+        submodname: The final component of the module name, e.g. 'mail'.
+        is_package: A bool indicating whether this is a package.
+        fullpath: The path to the module's source code including the
+          zipfile's filename.
+        source: The module's source code.
+
+    Raises:
+      ImportError if the module is not found in the archive.
+    """
+    submodname, is_package, relpath = self._get_info(fullmodname)
+    fullpath = '%s%s%s' % (self.archive, os.sep, relpath)
+    source = self.zipfile.read(relpath.replace(os.sep, '/'))
+    source = source.replace('\r\n', '\n')
+    source = source.replace('\r', '\n')
+    return submodname, is_package, fullpath, source
+
+  def find_module(self, fullmodname, path=None):
+    """PEP-302-compliant find_module() method.
+
+    Args:
+      fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
+      path: Optional and ignored; present for API compatibility only.
+
+    Returns:
+      None if the module isn't found in the archive; self if it is found.
+    """
+    try:
+      submodname, is_package, relpath = self._get_info(fullmodname)
+    except ImportError:
+      return None
+    else:
+      return self
+
+  def load_module(self, fullmodname):
+    """PEP-302-compliant load_module() method.
+
+    Args:
+      fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
+
+    Returns:
+      The module object constructed from the source code.
+
+    Raises:
+      SyntaxError if the module's source code is syntactically incorrect.
+      ImportError if there was a problem accessing the source code.
+      Whatever else can be raised by executing the module's source code.
+    """
+    submodname, is_package, fullpath, source = self._get_source(fullmodname)
+    code = compile(source, fullpath, 'exec')
+    mod = sys.modules.get(fullmodname)
+    try:
+      if mod is None:
+        mod = sys.modules[fullmodname] = types.ModuleType(fullmodname)
+      mod.__loader__ = self
+      mod.__file__ = fullpath
+      mod.__name__ = fullmodname
+      if is_package:
+        mod.__path__ = [os.path.dirname(mod.__file__)]
+      exec code in mod.__dict__
+    except:
+      if fullmodname in sys.modules:
+        del sys.modules[fullmodname]
+      raise
+    return mod
+
+
+  def get_data(self, fullpath):
+    """Return (binary) content of a data file in the zipfile."""
+    required_prefix = os.path.join(self.archive, '')
+    if not fullpath.startswith(required_prefix):
+      raise IOError('Path %r doesn\'t start with zipfile name %r' %
+                    (fullpath, required_prefix))
+    relpath = fullpath[len(required_prefix):]
+    try:
+      return self.zipfile.read(relpath)
+    except KeyError:
+      raise IOError('Path %r not found in zipfile %r' %
+                    (relpath, self.archive))
+
+  def is_package(self, fullmodname):
+    """Return whether a module is a package."""
+    submodname, is_package, relpath = self._get_info(fullmodname)
+    return is_package
+
+  def get_code(self, fullmodname):
+    """Return bytecode for a module."""
+    submodname, is_package, fullpath, source = self._get_source(fullmodname)
+    return compile(source, fullpath, 'exec')
+
+  def get_source(self, fullmodname):
+    """Return source code for a module."""
+    submodname, is_package, fullpath, source = self._get_source(fullmodname)
+    return source
+
+
+class ZipFileCache(UserDict.DictMixin):
+  """Helper class to export archive data in _zip_directory_cache.
+
+  Just take the info from _zipfile_cache and convert it as required.
+  """
+
+  def __init__(self, archive):
+    _zipfile_cache[archive]
+
+    self._archive = archive
+
+  def keys(self):
+    return _zipfile_cache[self._archive].namelist()
+
+  def __getitem__(self, filename):
+    info = _zipfile_cache[self._archive].getinfo(filename)
+    dt = info.date_time
+    dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
+    dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
+    return (os.path.join(self._archive, info.filename), info.compress_type,
+            info.compress_size, info.file_size, info.header_offset, dostime,
+            dosdate, info.CRC)
+
+
+class ZipDirectoryCache(UserDict.DictMixin):
+  """Helper class to export _zip_directory_cache."""
+
+  def keys(self):
+    return _zipfile_cache.keys()
+
+  def __getitem__(self, archive):
+    return ZipFileCache(archive)
+
+
+_zip_directory_cache = ZipDirectoryCache()
+
+
+sys.modules['zipimport'] = sys.modules[__name__]
+sys.path_hooks[:] = [zipimporter]
--- a/thirdparty/google_appengine/google/appengine/ext/admin/__init__.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/admin/__init__.py	Fri Apr 24 14:16:00 2009 +0000
@@ -224,7 +224,7 @@
     cron_info = _ParseCronYaml()
     values['cronjobs'] = []
     values['now'] = str(now)
-    if cron_info:
+    if cron_info and cron_info.cron:
       for entry in cron_info.cron:
         job = {}
         values['cronjobs'].append(job)
--- a/thirdparty/google_appengine/google/appengine/ext/gql/__init__.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/gql/__init__.py	Fri Apr 24 14:16:00 2009 +0000
@@ -327,11 +327,11 @@
     """Cast input values to Key() class using encoded string or tuple list."""
     if not len(values) % 2:
       return datastore_types.Key.from_path(_app=self.__app, *values)
-    elif len(values) == 1 and isinstance(values[0], str):
+    elif len(values) == 1 and isinstance(values[0], basestring):
       return datastore_types.Key(values[0])
     else:
       self.__CastError('KEY', values,
-                       'requires an even number of operands'
+                       'requires an even number of operands '
                        'or a single encoded string')
 
   def __CastGeoPt(self, values):
@@ -530,7 +530,7 @@
         raise datastore_errors.BadArgumentError(
             'Missing argument for bind, requires argument #%i, '
             'but only has %i args.' % (reference, num_args))
-    elif isinstance(reference, str):
+    elif isinstance(reference, basestring):
       if reference in keyword_args:
         return keyword_args[reference]
       else:
@@ -919,7 +919,7 @@
       assert condition.lower() == 'is'
 
     if condition.lower() != 'in' and operator == 'list':
-      sef.__Error('Only IN can process a list of values')
+      self.__Error('Only IN can process a list of values')
 
     self.__filters.setdefault(filter_rule, []).append((operator, parameters))
     return True
--- a/thirdparty/google_appengine/google/appengine/ext/remote_api/handler.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/handler.py	Fri Apr 24 14:16:00 2009 +0000
@@ -38,9 +38,13 @@
 
 
 import google
+import logging
+import os
 import pickle
 import sha
 import wsgiref.handlers
+import yaml
+
 from google.appengine.api import api_base_pb
 from google.appengine.api import apiproxy_stub
 from google.appengine.api import apiproxy_stub_map
@@ -192,8 +196,13 @@
     if not self.CheckIsAdmin():
       return
 
-    page = self.InfoPage()
-    self.response.out.write(page)
+    rtok = self.request.get('rtok', '0')
+    app_info = {
+        'app_id': os.environ['APPLICATION_ID'],
+        'rtok': rtok
+        }
+
+    self.response.out.write(yaml.dump(app_info))
 
   def post(self):
     """Handle POST requests by executing the API call."""
@@ -209,6 +218,7 @@
       response.mutable_response().set_contents(response_data.Encode())
       self.response.set_status(200)
     except Exception, e:
+      logging.exception('Exception while handling %s', request)
       self.response.set_status(200)
       response.mutable_exception().set_contents(pickle.dumps(e))
     self.response.out.write(response.Encode())
--- a/thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/remote_api/remote_api_stub.py	Fri Apr 24 14:16:00 2009 +0000
@@ -61,12 +61,16 @@
 
 
 
+import google
 import os
 import pickle
+import random
 import sha
 import sys
 import thread
 import threading
+import yaml
+
 from google.appengine.api import apiproxy_stub_map
 from google.appengine.datastore import datastore_pb
 from google.appengine.ext.remote_api import remote_api_pb
@@ -74,6 +78,14 @@
 from google.appengine.tools import appengine_rpc
 
 
+class Error(Exception):
+  """Base class for exceptions in this module."""
+
+
+class ConfigurationError(Error):
+  """Exception for configuration errors."""
+
+
 def GetUserAgent():
   """Determines the value of the 'User-agent' header to use for HTTP requests.
 
@@ -378,9 +390,15 @@
                              path,
                              auth_func,
                              servername=None,
-                             rpc_server_factory=appengine_rpc.HttpRpcServer):
+                             rpc_server_factory=appengine_rpc.HttpRpcServer,
+                             rtok=None,
+                             secure=False):
   """Does necessary setup to allow easy remote access to an AppEngine datastore.
 
+  Either servername must be provided or app_id must not be None.  If app_id
+  is None and a servername is provided, this function will send a request
+  to the server to retrieve the app_id.
+
   Args:
     app_id: The app_id of your app, as declared in app.yaml.
     path: The path to the remote_api handler for your app
@@ -392,12 +410,38 @@
     servername: The hostname your app is deployed on. Defaults to
       <app_id>.appspot.com.
     rpc_server_factory: A factory to construct the rpc server for the datastore.
+    rtok: The validation token to sent with app_id lookups. If None, a random
+      token is used.
+    secure: Use SSL when communicating with the server.
+
+  Raises:
+    urllib2.HTTPError: if app_id is not provided and there is an error while
+      retrieving it.
+    ConfigurationError: if there is a error configuring the DatstoreFileStub.
   """
+  if not servername and not app_id:
+    raise ConfigurationError('app_id or servername required')
   if not servername:
     servername = '%s.appspot.com' % (app_id,)
+  server = rpc_server_factory(servername, auth_func, GetUserAgent(),
+                              GetSourceName(), debug_data=False, secure=secure)
+  if not app_id:
+    if not rtok:
+      random.seed()
+      rtok = str(random.randint)
+    urlargs = {'rtok': rtok}
+    response = server.Send(path, payload=None, **urlargs)
+    if not response.startswith('{'):
+      raise ConfigurationError(
+          'Invalid response recieved from server: %s' % response)
+    app_info = yaml.load(response)
+    if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
+      raise ConfigurationError('Error parsing app_id lookup response')
+    if app_info['rtok'] != rtok:
+      raise ConfigurationError('Token validation failed during app_id lookup.')
+    app_id = app_info['app_id']
+
   os.environ['APPLICATION_ID'] = app_id
   apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
-  server = rpc_server_factory(servername, auth_func, GetUserAgent(),
-                                       GetSourceName())
   stub = RemoteDatastoreStub(server, path)
   apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', stub)
--- a/thirdparty/google_appengine/google/appengine/ext/webapp/__init__.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/ext/webapp/__init__.py	Fri Apr 24 14:16:00 2009 +0000
@@ -96,6 +96,9 @@
   You can access parsed query and POST values with the get() method; do not
   parse the query string yourself.
   """
+
+  request_body_tempfile_limit = 0
+
   uri = property(lambda self: self.url)
   query = property(lambda self: self.query_string)
 
--- a/thirdparty/google_appengine/google/appengine/tools/appcfg.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/appcfg.py	Fri Apr 24 14:16:00 2009 +0000
@@ -53,6 +53,7 @@
 from google.appengine.api import yaml_object
 from google.appengine.datastore import datastore_index
 from google.appengine.tools import appengine_rpc
+from google.appengine.tools import bulkloader
 
 
 MAX_FILES_TO_CLONE = 100
@@ -1156,6 +1157,7 @@
     StatusUpdate("Closing update: new version is ready to start serving.")
     self.server.Send("/api/appversion/startserving",
                      app_id=self.app_id, version=self.version)
+    self.in_transaction = False
 
   def Rollback(self):
     """Rolls back the transaction if one is in progress."""
@@ -1470,6 +1472,9 @@
     parser.add_option("-s", "--server", action="store", dest="server",
                       default="appengine.google.com",
                       metavar="SERVER", help="The server to connect to.")
+    parser.add_option("--secure", action="store_true", dest="secure",
+                      default=False,
+                      help="Use SSL when communicating with the server.")
     parser.add_option("-e", "--email", action="store", dest="email",
                       metavar="EMAIL", default=None,
                       help="The username to use. Will prompt if omitted.")
@@ -1557,7 +1562,8 @@
                                  host_override=self.options.host,
                                  save_cookies=self.options.save_cookies,
                                  auth_tries=auth_tries,
-                                 account_type="HOSTED_OR_GOOGLE")
+                                 account_type="HOSTED_OR_GOOGLE",
+                                 secure=self.options.secure)
 
   def _FindYaml(self, basepath, file_name):
     """Find yaml files in application directory.
@@ -1822,14 +1828,14 @@
 
     basepath = self.args[0]
     cron_entries = self._ParseCronYaml(basepath)
-    if cron_entries:
+    if cron_entries and cron_entries.cron:
       for entry in cron_entries.cron:
         description = entry.description
         if not description:
           description = "<no description>"
         print >>output, "\n%s:\nURL: %s\nSchedule: %s" % (description,
-                                                          entry.url,
-                                                          entry.schedule)
+                                                          entry.schedule,
+                                                          entry.url)
         schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
         matches = schedule.GetMatches(now, self.options.num_runs)
         for match in matches:
@@ -1847,6 +1853,164 @@
                       help="Number of runs of each cron job to display"
                       "Default is 5")
 
+  def _CheckRequiredUploadOptions(self):
+    """Checks that upload options are present."""
+    for option in ["filename", "kind", "config_file"]:
+      if getattr(self.options, option) is None:
+        self.parser.error("Option '%s' is required." % option)
+    if not self.options.url:
+      self.parser.error("You must have google.appengine.ext.remote_api.handler "
+                        "assigned to an endpoint in app.yaml, or provide "
+                        "the url of the handler via the 'url' option.")
+
+  def InferUploadUrl(self, appyaml):
+    """Uses app.yaml to determine the remote_api endpoint.
+
+    Args:
+      appyaml: A parsed app.yaml file.
+
+    Returns:
+      The url of the remote_api endpoint as a string, or None
+    """
+    handlers = appyaml.handlers
+    handler_suffix = "remote_api/handler.py"
+    app_id = appyaml.application
+    for handler in handlers:
+      if hasattr(handler, "script") and handler.script:
+        if handler.script.endswith(handler_suffix):
+          server = self.options.server
+          if server == "appengine.google.com":
+            return "http://%s.appspot.com%s" % (app_id, handler.url)
+          else:
+            return "http://%s%s" % (server, handler.url)
+    return None
+
+  def RunBulkloader(self, **kwargs):
+    """Invokes the bulkloader with the given keyword arguments.
+
+    Args:
+      kwargs: Keyword arguments to pass to bulkloader.Run().
+    """
+    try:
+      import sqlite3
+    except ImportError:
+      logging.error("upload_data action requires SQLite3 and the python "
+                    "sqlite3 module (included in python since 2.5).")
+      sys.exit(1)
+
+    sys.exit(bulkloader.Run(kwargs))
+
+  def PerformUpload(self, run_fn=None):
+    """Performs a datastore upload via the bulkloader.
+
+    Args:
+      run_fn: Function to invoke the bulkloader, used for testing.
+    """
+    if run_fn is None:
+      run_fn = self.RunBulkloader
+
+    if len(self.args) != 1:
+      self.parser.error("Expected <directory> argument.")
+
+    basepath = self.args[0]
+    appyaml = self._ParseAppYaml(basepath)
+
+    self.options.app_id = appyaml.application
+
+    if not self.options.url:
+      url = self.InferUploadUrl(appyaml)
+      if url is not None:
+        self.options.url = url
+
+    self._CheckRequiredUploadOptions()
+
+    if self.options.batch_size < 1:
+      self.parser.error("batch_size must be 1 or larger.")
+
+    if verbosity == 1:
+      logging.getLogger().setLevel(logging.INFO)
+      self.options.debug = False
+    else:
+      logging.getLogger().setLevel(logging.DEBUG)
+      self.options.debug = True
+
+    StatusUpdate("Uploading data records.")
+
+    run_fn(app_id=self.options.app_id,
+           url=self.options.url,
+           filename=self.options.filename,
+           batch_size=self.options.batch_size,
+           kind=self.options.kind,
+           num_threads=self.options.num_threads,
+           bandwidth_limit=self.options.bandwidth_limit,
+           rps_limit=self.options.rps_limit,
+           http_limit=self.options.http_limit,
+           db_filename=self.options.db_filename,
+           config_file=self.options.config_file,
+           auth_domain=self.options.auth_domain,
+           has_header=self.options.has_header,
+           loader_opts=self.options.loader_opts,
+           log_file=self.options.log_file,
+           passin=self.options.passin,
+           email=self.options.email,
+           debug=self.options.debug,
+
+           exporter_opts=None,
+           download=False,
+           result_db_filename=None,
+           )
+
+  def _PerformUploadOptions(self, parser):
+    """Adds 'upload_data' specific options to the 'parser' passed in.
+
+    Args:
+      parser: An instance of OptionsParser.
+    """
+    parser.add_option("--filename", type="string", dest="filename",
+                      action="store",
+                      help="The name of the file containing the input data."
+                      " (Required)")
+    parser.add_option("--config_file", type="string", dest="config_file",
+                      action="store",
+                      help="Name of the configuration file. (Required)")
+    parser.add_option("--kind", type="string", dest="kind",
+                      action="store",
+                      help="The kind of the entities to store. (Required)")
+    parser.add_option("--url", type="string", dest="url",
+                      action="store",
+                      help="The location of the remote_api endpoint.")
+    parser.add_option("--num_threads", type="int", dest="num_threads",
+                      action="store", default=10,
+                      help="Number of threads to upload records with.")
+    parser.add_option("--batch_size", type="int", dest="batch_size",
+                      action="store", default=10,
+                      help="Number of records to post in each request.")
+    parser.add_option("--bandwidth_limit", type="int", dest="bandwidth_limit",
+                      action="store", default=250000,
+                      help="The maximum bytes/second bandwidth for transfers.")
+    parser.add_option("--rps_limit", type="int", dest="rps_limit",
+                      action="store", default=20,
+                      help="The maximum records/second for transfers.")
+    parser.add_option("--http_limit", type="int", dest="http_limit",
+                      action="store", default=8,
+                      help="The maximum requests/second for transfers.")
+    parser.add_option("--db_filename", type="string", dest="db_filename",
+                      action="store",
+                      help="Name of the progress database file.")
+    parser.add_option("--auth_domain", type="string", dest="auth_domain",
+                      action="store", default="gmail.com",
+                      help="The name of the authorization domain to use.")
+    parser.add_option("--has_header", dest="has_header",
+                      action="store_true", default=False,
+                      help="Whether the first line of the input file should be"
+                      " skipped")
+    parser.add_option("--loader_opts", type="string", dest="loader_opts",
+                      help="A string to pass to the Loader.Initialize method.")
+    parser.add_option("--log_file", type="string", dest="log_file",
+                      help="File to write bulkloader logs.  If not supplied "
+                           "then a new log file will be created, named: "
+                           "bulkloader-log-TIMESTAMP.")
+
   class Action(object):
     """Contains information about a command line action.
 
@@ -1953,6 +2117,15 @@
 The 'cron_info' command will display the next 'number' runs (default 5) for
 each cron job defined in the cron.yaml file."""),
 
+      "upload_data": Action(
+          function="PerformUpload",
+          usage="%prog [options] upload_data <directory>",
+          options=_PerformUploadOptions,
+          short_desc="Upload CSV records to datastore",
+          long_desc="""
+The 'upload_data' command translates CSV records into datastore entities and
+uploads them into your application's datastore."""),
+
 
 
   }
--- a/thirdparty/google_appengine/google/appengine/tools/appengine_rpc.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/appengine_rpc.py	Fri Apr 24 14:16:00 2009 +0000
@@ -41,6 +41,7 @@
 except ImportError:
   pass
 
+logger = logging.getLogger('google.appengine.tools.appengine_rpc')
 
 def GetPlatformToken(os_module=os, sys_module=sys, platform=sys.platform):
   """Returns a 'User-agent' token for the host system platform.
@@ -61,6 +62,33 @@
   else:
     return "unknown"
 
+def HttpRequestToString(req, include_data=True):
+  """Converts a urllib2.Request to a string.
+
+  Args:
+    req: urllib2.Request
+  Returns:
+    Multi-line string representing the request.
+  """
+
+  headers = ""
+  for header in req.header_items():
+    headers += "%s: %s\n" % (header[0], header[1])
+
+  template = ("%(method)s %(selector)s %(type)s/1.1\n"
+              "Host: %(host)s\n"
+              "%(headers)s")
+  if include_data:
+    template = template + "\n%(data)s"
+
+  return template % {
+      'method' : req.get_method(),
+      'selector' : req.get_selector(),
+      'type' : req.get_type().upper(),
+      'host' : req.get_host(),
+      'headers': headers,
+      'data': req.get_data(),
+      }
 
 class ClientLoginError(urllib2.HTTPError):
   """Raised to indicate there was an error authenticating with ClientLogin."""
@@ -70,13 +98,16 @@
     self.args = args
     self.reason = args["Error"]
 
+  def read(self):
+    return '%d %s: %s' % (self.code, self.msg, self.reason)
+
 
 class AbstractRpcServer(object):
   """Provides a common interface for a simple RPC server."""
 
   def __init__(self, host, auth_function, user_agent, source,
                host_override=None, extra_headers=None, save_cookies=False,
-               auth_tries=3, account_type=None):
+               auth_tries=3, account_type=None, debug_data=True, secure=False):
     """Creates a new HttpRpcServer.
 
     Args:
@@ -95,13 +126,19 @@
         implement this functionality.  Defaults to False.
       auth_tries: The number of times to attempt auth_function before failing.
       account_type: One of GOOGLE, HOSTED_OR_GOOGLE, or None for automatic.
+      debug_data: Whether debugging output should include data contents.
     """
+    if secure:
+      self.scheme = "https"
+    else:
+      self.scheme = "http"
     self.host = host
     self.host_override = host_override
     self.auth_function = auth_function
     self.source = source
     self.authenticated = False
     self.auth_tries = auth_tries
+    self.debug_data = debug_data
 
     self.account_type = account_type
 
@@ -115,9 +152,9 @@
     self.cookie_jar = cookielib.MozillaCookieJar()
     self.opener = self._GetOpener()
     if self.host_override:
-      logging.info("Server: %s; Host: %s", self.host, self.host_override)
+      logger.info("Server: %s; Host: %s", self.host, self.host_override)
     else:
-      logging.info("Server: %s", self.host)
+      logger.info("Server: %s", self.host)
 
     if ((self.host_override and self.host_override == "localhost") or
         self.host == "localhost" or self.host.startswith("localhost:")):
@@ -200,8 +237,9 @@
     continue_location = "http://localhost/"
     args = {"continue": continue_location, "auth": auth_token}
     login_path = os.environ.get("APPCFG_LOGIN_PATH", "/_ah")
-    req = self._CreateRequest("http://%s%s/login?%s" %
-                              (self.host, login_path, urllib.urlencode(args)))
+    req = self._CreateRequest("%s://%s%s/login?%s" %
+                              (self.scheme, self.host, login_path,
+                               urllib.urlencode(args)))
     try:
       response = self.opener.open(req)
     except urllib2.HTTPError, e:
@@ -291,30 +329,39 @@
     socket.setdefaulttimeout(timeout)
     try:
       tries = 0
+      auth_tried = False
       while True:
         tries += 1
         args = dict(kwargs)
-        url = "http://%s%s?%s" % (self.host, request_path,
-                                  urllib.urlencode(args))
+        url = "%s://%s%s?%s" % (self.scheme, self.host, request_path,
+                                urllib.urlencode(args))
         req = self._CreateRequest(url=url, data=payload)
         req.add_header("Content-Type", content_type)
         req.add_header("X-appcfg-api-version", "1")
         try:
+          logger.debug('Sending HTTP request:\n%s' %
+                       HttpRequestToString(req, include_data=self.debug_data))
           f = self.opener.open(req)
           response = f.read()
           f.close()
           return response
         except urllib2.HTTPError, e:
-          logging.debug("Got http error, this is try #%s" % tries)
+          logger.debug("Got http error, this is try #%s" % tries)
           if tries > self.auth_tries:
             raise
           elif e.code == 401:
+            if auth_tried:
+              raise
+            auth_tried = True
             self._Authenticate()
           elif e.code >= 500 and e.code < 600:
             continue
           elif e.code == 302:
+            if auth_tried:
+              raise
+            auth_tried = True
             loc = e.info()["location"]
-            logging.debug("Got 302 redirect. Location: %s" % loc)
+            logger.debug("Got 302 redirect. Location: %s" % loc)
             if loc.startswith("https://www.google.com/accounts/ServiceLogin"):
               self._Authenticate()
             elif re.match(r"https://www.google.com/a/[a-z0-9.-]+/ServiceLogin",
@@ -337,14 +384,14 @@
   def _Authenticate(self):
     """Save the cookie jar after authentication."""
     if cert_file_available and not uses_cert_verification:
-      logging.warn("ssl module not found. Without this the identity of the "
-                   "remote host cannot be verified, and connections are NOT "
-                   "secure. To fix this, please install the ssl module from "
-                   "http://pypi.python.org/pypi/ssl")
+      logger.warn("ssl module not found. Without this the identity of the "
+                  "remote host cannot be verified, and connections are NOT "
+                  "secure. To fix this, please install the ssl module from "
+                  "http://pypi.python.org/pypi/ssl")
     super(HttpRpcServer, self)._Authenticate()
     if self.cookie_jar.filename is not None and self.save_cookies:
-      logging.info("Saving authentication cookies to %s" %
-                   self.cookie_jar.filename)
+      logger.info("Saving authentication cookies to %s" %
+                  self.cookie_jar.filename)
       self.cookie_jar.save()
 
   def _GetOpener(self):
@@ -369,19 +416,19 @@
         try:
           self.cookie_jar.load()
           self.authenticated = True
-          logging.info("Loaded authentication cookies from %s" %
-                       self.cookie_jar.filename)
+          logger.info("Loaded authentication cookies from %s" %
+                      self.cookie_jar.filename)
         except (OSError, IOError, cookielib.LoadError), e:
-          logging.debug("Could not load authentication cookies; %s: %s",
-                        e.__class__.__name__, e)
+          logger.debug("Could not load authentication cookies; %s: %s",
+                       e.__class__.__name__, e)
           self.cookie_jar.filename = None
       else:
         try:
           fd = os.open(self.cookie_jar.filename, os.O_CREAT, 0600)
           os.close(fd)
         except (OSError, IOError), e:
-          logging.debug("Could not create authentication cookies file; %s: %s",
-                        e.__class__.__name__, e)
+          logger.debug("Could not create authentication cookies file; %s: %s",
+                       e.__class__.__name__, e)
           self.cookie_jar.filename = None
 
     opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
--- a/thirdparty/google_appengine/google/appengine/tools/bulkloader.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/bulkloader.py	Fri Apr 24 14:16:00 2009 +0000
@@ -15,7 +15,7 @@
 # limitations under the License.
 #
 
-"""Imports CSV data over HTTP.
+"""Imports data over HTTP.
 
 Usage:
   %(arg0)s [flags]
@@ -27,6 +27,8 @@
                             UserProperties. (Default: gmail.com)
     --bandwidth_limit=<int> The maximum number of bytes per second for the
                             aggregate transfer of data to the server. Bursts
+                            may exceed this, but overall transfer rate is
+                            restricted to this rate. (Default 250000)
     --batch_size=<int>      Number of Entity objects to include in each post to
                             the URL endpoint. The more data per row/Entity, the
                             smaller the batch size should be. (Default 10)
@@ -38,15 +40,25 @@
                             bulkloader-progress-TIMESTAMP.
                             The special filename "skip" may be used to simply
                             skip reading/writing any progress information.
-    --filename=<path>       Path to the CSV file to import. (Required)
+    --download              Export entities to a file.
+    --email=<string>        The username to use. Will prompt if omitted.
+    --exporter_opts=<string>
+                            A string to pass to the Exporter.initialize method.
+    --filename=<path>       Path to the file to import. (Required)
+    --has_header            Skip the first row of the input.
     --http_limit=<int>      The maximum numer of HTTP requests per second to
                             send to the server. (Default: 8)
     --kind=<string>         Name of the Entity object kind to put in the
                             datastore. (Required)
+    --loader_opts=<string>  A string to pass to the Loader.initialize method.
+    --log_file=<path>       File to write bulkloader logs.  If not supplied
+                            then a new log file will be created, named:
+                            bulkloader-log-TIMESTAMP.
     --num_threads=<int>     Number of threads to use for uploading entities
                             (Default 10)
-                            may exceed this, but overall transfer rate is
-                            restricted to this rate. (Default 250000)
+    --passin                Read the login password from stdin.
+    --result_db_filename=<path>
+                            Result database to write to for downloads.
     --rps_limit=<int>       The maximum number of records per second to
                             transfer to the server. (Default: 20)
     --url=<string>          URL endpoint to post to for importing data.
@@ -66,23 +78,29 @@
 
 
 
+import cPickle
 import csv
+import errno
 import getopt
 import getpass
+import imp
 import logging
-import new
 import os
 import Queue
+import re
 import signal
+import StringIO
 import sys
 import threading
 import time
-import traceback
 import urllib2
 import urlparse
 
+from google.appengine.api import datastore_errors
 from google.appengine.ext import db
+from google.appengine.ext.db import polymodel
 from google.appengine.ext.remote_api import remote_api_stub
+from google.appengine.runtime import apiproxy_errors
 from google.appengine.tools import appengine_rpc
 
 try:
@@ -90,7 +108,7 @@
 except ImportError:
   pass
 
-UPLOADER_VERSION = '1'
+logger = logging.getLogger('google.appengine.tools.bulkloader')
 
 DEFAULT_THREAD_COUNT = 10
 
@@ -105,6 +123,10 @@
 STATE_SENT = 2
 STATE_NOT_SENT = 3
 
+STATE_GETTING = 1
+STATE_GOT = 2
+STATE_NOT_GOT = 3
+
 MINIMUM_THROTTLE_SLEEP_DURATION = 0.001
 
 DATA_CONSUMED_TO_HERE = 'DATA_CONSUMED_TO_HERE'
@@ -128,8 +150,11 @@
 HTTPS_REQUESTS = 'https-requests'
 RECORDS = 'records'
 
-
-def StateMessage(state):
+MAXIMUM_INCREASE_DURATION = 8.0
+MAXIMUM_HOLD_DURATION = 10.0
+
+
+def ImportStateMessage(state):
   """Converts a numeric state identifier to a status message."""
   return ({
       STATE_READ: 'Batch read from file.',
@@ -139,12 +164,46 @@
   }[state])
 
 
+def ExportStateMessage(state):
+  """Converts a numeric state identifier to a status message."""
+  return ({
+      STATE_READ: 'Batch read from file.',
+      STATE_GETTING: 'Fetching batch from server',
+      STATE_GOT: 'Batch successfully fetched.',
+      STATE_NOT_GOT: 'Error while fetching batch'
+  }[state])
+
+
+def ExportStateName(state):
+  """Converts a numeric state identifier to a string."""
+  return ({
+      STATE_READ: 'READ',
+      STATE_GETTING: 'GETTING',
+      STATE_GOT: 'GOT',
+      STATE_NOT_GOT: 'NOT_GOT'
+  }[state])
+
+
+def ImportStateName(state):
+  """Converts a numeric state identifier to a string."""
+  return ({
+      STATE_READ: 'READ',
+      STATE_GETTING: 'SENDING',
+      STATE_GOT: 'SENT',
+      STATE_NOT_GOT: 'NOT_SENT'
+  }[state])
+
+
 class Error(Exception):
   """Base-class for exceptions in this module."""
 
 
+class MissingPropertyError(Error):
+  """An expected field is missing from an entity, and no default was given."""
+
+
 class FatalServerError(Error):
-  """An unrecoverable error occurred while trying to post data to the server."""
+  """An unrecoverable error occurred while posting data to the server."""
 
 
 class ResumeError(Error):
@@ -159,72 +218,107 @@
   """Error while trying to authenticate with the server."""
 
 
-def GetCSVGeneratorFactory(csv_filename, batch_size,
+class FileNotFoundError(Error):
+  """A filename passed in by the user refers to a non-existent input file."""
+
+
+class FileNotReadableError(Error):
+  """A filename passed in by the user refers to a non-readable input file."""
+
+
+class FileExistsError(Error):
+  """A filename passed in by the user refers to an existing output file."""
+
+
+class FileNotWritableError(Error):
+  """A filename passed in by the user refers to a non-writable output file."""
+
+
+class KeyRangeError(Error):
+  """Error while trying to generate a KeyRange."""
+
+
+class BadStateError(Error):
+  """A work item in an unexpected state was encountered."""
+
+
+class NameClashError(Error):
+  """A name clash occurred while trying to alias old method names."""
+  def __init__(self, old_name, new_name, klass):
+    Error.__init__(self, old_name, new_name, klass)
+    self.old_name = old_name
+    self.new_name = new_name
+    self.klass = klass
+
+
+def GetCSVGeneratorFactory(kind, csv_filename, batch_size, csv_has_header,
                            openfile=open, create_csv_reader=csv.reader):
   """Return a factory that creates a CSV-based WorkItem generator.
 
   Args:
+    kind: The kind of the entities being uploaded.
     csv_filename: File on disk containing CSV data.
     batch_size: Maximum number of CSV rows to stash into a WorkItem.
+    csv_has_header: Whether to skip the first row of the CSV.
     openfile: Used for dependency injection.
     create_csv_reader: Used for dependency injection.
 
-  Returns: A callable (accepting the Progress Queue and Progress
-    Generators as input) which creates the WorkItem generator.
+  Returns:
+    A callable (accepting the Progress Queue and Progress Generators
+    as input) which creates the WorkItem generator.
   """
+  loader = Loader.RegisteredLoader(kind)
+  loader._Loader__openfile = openfile
+  loader._Loader__create_csv_reader = create_csv_reader
+  record_generator = loader.generate_records(csv_filename)
 
   def CreateGenerator(progress_queue, progress_generator):
-    """Initialize a CSV generator linked to a progress generator and queue.
+    """Initialize a WorkItem generator linked to a progress generator and queue.
 
     Args:
       progress_queue: A ProgressQueue instance to send progress information.
       progress_generator: A generator of progress information or None.
 
     Returns:
-      A CSVGenerator instance.
+      A WorkItemGenerator instance.
     """
-    return CSVGenerator(progress_queue,
-                        progress_generator,
-                        csv_filename,
-                        batch_size,
-                        openfile,
-                        create_csv_reader)
+    return WorkItemGenerator(progress_queue,
+                             progress_generator,
+                             record_generator,
+                             csv_has_header,
+                             batch_size)
+
   return CreateGenerator
 
 
-class CSVGenerator(object):
-  """Reads a CSV file and generates WorkItems containing batches of records."""
+class WorkItemGenerator(object):
+  """Reads rows from a row generator and generates WorkItems of batches."""
 
   def __init__(self,
                progress_queue,
                progress_generator,
-               csv_filename,
-               batch_size,
-               openfile,
-               create_csv_reader):
-    """Initializes a CSV generator.
+               record_generator,
+               skip_first,
+               batch_size):
+    """Initialize a WorkItemGenerator.
 
     Args:
-      progress_queue: A queue used for tracking progress information.
-      progress_generator: A generator of prior progress information, or None
-        if there is no prior status.
-      csv_filename: File on disk containing CSV data.
-      batch_size: Maximum number of CSV rows to stash into a WorkItem.
-      openfile: Used for dependency injection of 'open'.
-      create_csv_reader: Used for dependency injection of 'csv.reader'.
+      progress_queue: A progress queue with which to associate WorkItems.
+      progress_generator: A generator of progress information.
+      record_generator: A generator of data records.
+      skip_first: Whether to skip the first data record.
+      batch_size: The number of data records per WorkItem.
     """
     self.progress_queue = progress_queue
     self.progress_generator = progress_generator
-    self.csv_filename = csv_filename
+    self.reader = record_generator
+    self.skip_first = skip_first
     self.batch_size = batch_size
-    self.openfile = openfile
-    self.create_csv_reader = create_csv_reader
     self.line_number = 1
     self.column_count = None
     self.read_rows = []
-    self.reader = None
     self.row_count = 0
-    self.sent_count = 0
+    self.xfer_count = 0
 
   def _AdvanceTo(self, line):
     """Advance the reader to the given line.
@@ -236,7 +330,7 @@
       self.reader.next()
       self.line_number += 1
       self.row_count += 1
-      self.sent_count += 1
+      self.xfer_count += 1
 
   def _ReadRows(self, key_start, key_end):
     """Attempts to read and encode rows [key_start, key_end].
@@ -286,7 +380,7 @@
     return item
 
   def Batches(self):
-    """Reads the CSV data file and generates WorkItems.
+    """Reads from the record_generator and generates WorkItems.
 
     Yields:
       Instances of class WorkItem
@@ -295,28 +389,23 @@
       ResumeError: If the progress database and data file indicate a different
         number of rows.
     """
-    csv_file = self.openfile(self.csv_filename, 'r')
-    csv_content = csv_file.read()
-    if csv_content:
-      has_headers = csv.Sniffer().has_header(csv_content)
-    else:
-      has_headers = False
-    csv_file.seek(0)
-    self.reader = self.create_csv_reader(csv_file, skipinitialspace=True)
-    if has_headers:
-      logging.info('The CSV file appears to have a header line, skipping.')
-      self.reader.next()
+    if self.skip_first:
+      logger.info('Skipping header line.')
+      try:
+        self.reader.next()
+      except StopIteration:
+        return
 
     exhausted = False
 
     self.line_number = 1
     self.column_count = None
 
-    logging.info('Starting import; maximum %d entities per post',
-                 self.batch_size)
+    logger.info('Starting import; maximum %d entities per post',
+                self.batch_size)
 
     state = None
-    if self.progress_generator is not None:
+    if self.progress_generator:
       for progress_key, state, key_start, key_end in self.progress_generator:
         if key_start:
           try:
@@ -327,7 +416,7 @@
                                  self.read_rows,
                                  progress_key=progress_key)
           except StopIteration:
-            logging.error('Mismatch between data file and progress database')
+            logger.error('Mismatch between data file and progress database')
             raise ResumeError(
                 'Mismatch between data file and progress database')
         elif state == DATA_CONSUMED_TO_HERE:
@@ -349,6 +438,91 @@
           yield self._MakeItem(key_start, key_end, self.read_rows)
 
 
+class CSVGenerator(object):
+  """Reads a CSV file and generates data records."""
+
+  def __init__(self,
+               csv_filename,
+               openfile=open,
+               create_csv_reader=csv.reader):
+    """Initializes a CSV generator.
+
+    Args:
+      csv_filename: File on disk containing CSV data.
+      openfile: Used for dependency injection of 'open'.
+      create_csv_reader: Used for dependency injection of 'csv.reader'.
+    """
+    self.csv_filename = csv_filename
+    self.openfile = openfile
+    self.create_csv_reader = create_csv_reader
+
+  def Records(self):
+    """Reads the CSV data file and generates row records.
+
+    Yields:
+      Lists of strings
+
+    Raises:
+      ResumeError: If the progress database and data file indicate a different
+        number of rows.
+    """
+    csv_file = self.openfile(self.csv_filename, 'rb')
+    reader = self.create_csv_reader(csv_file, skipinitialspace=True)
+    return reader
+
+
+class KeyRangeGenerator(object):
+  """Generates ranges of keys to download.
+
+  Reads progress information from the progress database and creates
+  KeyRange objects corresponding to incompletely downloaded parts of an
+  export.
+  """
+
+  def __init__(self, kind, progress_queue, progress_generator):
+    """Initialize the KeyRangeGenerator.
+
+    Args:
+      kind: The kind of entities being transferred.
+      progress_queue: A queue used for tracking progress information.
+      progress_generator: A generator of prior progress information, or None
+        if there is no prior status.
+    """
+    self.kind = kind
+    self.row_count = 0
+    self.xfer_count = 0
+    self.progress_queue = progress_queue
+    self.progress_generator = progress_generator
+
+  def Batches(self):
+    """Iterate through saved progress information.
+
+    Yields:
+      KeyRange instances corresponding to undownloaded key ranges.
+    """
+    if self.progress_generator is not None:
+      for progress_key, state, key_start, key_end in self.progress_generator:
+        if state is not None and state != STATE_GOT and key_start is not None:
+          key_start = ParseKey(key_start)
+          key_end = ParseKey(key_end)
+
+          result = KeyRange(self.progress_queue,
+                            self.kind,
+                            key_start=key_start,
+                            key_end=key_end,
+                            progress_key=progress_key,
+                            direction=KeyRange.ASC,
+                            state=STATE_READ)
+          yield result
+    else:
+
+      yield KeyRange(
+          self.progress_queue, self.kind,
+          key_start=None,
+          key_end=None,
+          direction=KeyRange.DESC)
+
+
 class ReQueue(object):
   """A special thread-safe queue.
 
@@ -358,7 +532,7 @@
   the number of outstanding tasks.
 
   This class shares an interface with Queue.Queue and provides the
-  additional Reput method.
+  additional reput method.
   """
 
   def __init__(self,
@@ -474,7 +648,7 @@
     Re-putting an item does not increase the number of outstanding
     tasks, so the reput item should be uniquely associated with an
     item that was previously removed from the requeue and for which
-    task_done has not been called.
+    TaskDone has not been called.
 
     Args:
       item: An item to add to the requeue.
@@ -538,6 +712,9 @@
     """Try to get an item from the queue without blocking."""
     return self.get(block=False)
 
+  def qsize(self):
+    return self.queue.qsize() + self.requeue.qsize()
+
 
 class ThrottleHandler(urllib2.BaseHandler):
   """A urllib2 handler for http and https requests that adds to a throttle."""
@@ -701,133 +878,127 @@
   Returns:
     A factory to produce a ThrottledHttpRpcServer.
   """
+
   def MakeRpcServer(*args, **kwargs):
+    """Factory to produce a ThrottledHttpRpcServer.
+
+    Args:
+      args: Positional args to pass to ThrottledHttpRpcServer.
+      kwargs: Keyword args to pass to ThrottledHttpRpcServer.
+
+    Returns:
+      A ThrottledHttpRpcServer instance.
+    """
     kwargs['account_type'] = 'HOSTED_OR_GOOGLE'
     kwargs['save_cookies'] = True
     return ThrottledHttpRpcServer(throttle, request_manager, *args, **kwargs)
   return MakeRpcServer
 
 
-class RequestManager(object):
-  """A class which wraps a connection to the server."""
-
-  source = 'google-bulkloader-%s' % UPLOADER_VERSION
-  user_agent = source
-
-  def __init__(self,
-               app_id,
-               host_port,
-               url_path,
-               kind,
-               throttle):
-    """Initialize a RequestManager object.
-
-    Args:
-      app_id: String containing the application id for requests.
-      host_port: String containing the "host:port" pair; the port is optional.
-      url_path: partial URL (path) to post entity data to.
-      kind: Kind of the Entity records being posted.
-      throttle: A Throttle instance.
-    """
-    self.app_id = app_id
-    self.host_port = host_port
-    self.host = host_port.split(':')[0]
-    if url_path and url_path[0] != '/':
-      url_path = '/' + url_path
-    self.url_path = url_path
-    self.kind = kind
-    self.throttle = throttle
-    self.credentials = None
-    throttled_rpc_server_factory = ThrottledHttpRpcServerFactory(
-        self.throttle, self)
-    logging.debug('Configuring remote_api. app_id = %s, url_path = %s, '
-                  'servername = %s' % (app_id, url_path, host_port))
-    remote_api_stub.ConfigureRemoteDatastore(
-        app_id,
-        url_path,
-        self.AuthFunction,
-        servername=host_port,
-        rpc_server_factory=throttled_rpc_server_factory)
-    self.authenticated = False
-
-  def Authenticate(self):
-    """Invoke authentication if necessary."""
-    self.rpc_server.Send(self.url_path, payload=None)
-    self.authenticated = True
-
-  def AuthFunction(self,
-                   raw_input_fn=raw_input,
-                   password_input_fn=getpass.getpass):
-    """Prompts the user for a username and password.
-
-    Caches the results the first time it is called and returns the
-    same result every subsequent time.
+class ExportResult(object):
+  """Holds the decoded content for the result of an export requests."""
+
+  def __init__(self, continued, direction, keys, entities):
+    self.continued = continued
+    self.direction = direction
+    self.keys = keys
+    self.entities = entities
+    self.count = len(keys)
+    assert self.count == len(entities)
+    assert direction in (KeyRange.ASC, KeyRange.DESC)
+    if self.count > 0:
+      if direction == KeyRange.ASC:
+        self.key_start = keys[0]
+        self.key_end = keys[-1]
+      else:
+        self.key_start = keys[-1]
+        self.key_end = keys[0]
+
+  def __str__(self):
+    return 'continued = %s\n%s' % (
+        str(self.continued), '\n'.join(self.entities))
+
+
+class _WorkItem(object):
+  """Holds a description of a unit of upload or download work."""
+
+  def __init__(self, progress_queue, key_start, key_end, state_namer,
+               state=STATE_READ, progress_key=None):
+    """Initialize the _WorkItem instance.
 
     Args:
-      raw_input_fn: Used for dependency injection.
-      password_input_fn: Used for dependency injection.
-
-    Returns:
-      A pair of the username and password.
+      progress_queue: A queue used for tracking progress information.
+      key_start: The starting key, inclusive.
+      key_end: The ending key, inclusive.
+      state_namer: Function to describe work item states.
+      state: The initial state of the work item.
+      progress_key: If this WorkItem represents state from a prior run,
+        then this will be the key within the progress database.
     """
-    if self.credentials is not None:
-      return self.credentials
-    print 'Please enter login credentials for %s (%s)' % (
-        self.host, self.app_id)
-    email = raw_input_fn('Email: ')
-    if email:
-      password_prompt = 'Password for %s: ' % email
-      password = password_input_fn(password_prompt)
-    else:
-      password = None
-    self.credentials = (email, password)
-    return self.credentials
-
-  def _GetHeaders(self):
-    """Constructs a dictionary of extra headers to send with a request."""
-    headers = {
-        'GAE-Uploader-Version': UPLOADER_VERSION,
-        'GAE-Uploader-Kind': self.kind
-        }
-    return headers
-
-  def EncodeContent(self, rows):
-    """Encodes row data to the wire format.
+    self.progress_queue = progress_queue
+    self.key_start = key_start
+    self.key_end = key_end
+    self.state_namer = state_namer
+    self.state = state
+    self.progress_key = progress_key
+    self.progress_event = threading.Event()
+
+  def _AssertInState(self, *states):
+    """Raises an Error if the state of this range is not in states."""
+    if not self.state in states:
+      raise BadStateError('%s:%s not in %s' %
+                          (str(self),
+                           self.state_namer(self.state),
+                           map(self.state_namer, states)))
+
+  def _AssertProgressKey(self):
+    """Raises an Error if the progress key is None."""
+    if self.progress_key is None:
+      raise BadStateError('%s: Progress key is missing' % str(self))
+
+  def MarkAsRead(self):
+    """Mark this _WorkItem as read, updating the progress database."""
+    self._AssertInState(STATE_READ)
+    self._StateTransition(STATE_READ, blocking=True)
+
+  def MarkAsTransferring(self):
+    """Mark this _WorkItem as transferring, updating the progress database."""
+    self._AssertInState(STATE_READ, STATE_NOT_GOT)
+    self._AssertProgressKey()
+    self._StateTransition(STATE_GETTING, blocking=True)
+
+  def MarkAsTransferred(self):
+    """Mark this _WorkItem as transferred, updating the progress database."""
+    raise NotImplementedError()
+
+  def MarkAsError(self):
+    """Mark this _WorkItem as failed, updating the progress database."""
+    self._AssertInState(STATE_GETTING)
+    self._AssertProgressKey()
+    self._StateTransition(STATE_NOT_GOT, blocking=True)
+
+  def _StateTransition(self, new_state, blocking=False):
+    """Transition the work item to a new state, storing progress information.
 
     Args:
-      rows: A list of pairs of a line number and a list of column values.
-
-    Returns:
-      A list of db.Model instances.
+      new_state: The state to transition to.
+      blocking: Whether to block for the progress thread to acknowledge the
+        transition.
     """
-    try:
-      loader = Loader.RegisteredLoaders()[self.kind]
-    except KeyError:
-      logging.error('No Loader defined for kind %s.' % self.kind)
-      raise ConfigurationError('No Loader defined for kind %s.' % self.kind)
-    entities = []
-    for line_number, values in rows:
-      key = loader.GenerateKey(line_number, values)
-      entity = loader.CreateEntity(values, key_name=key)
-      entities.extend(entity)
-
-    return entities
-
-  def PostEntities(self, item):
-    """Posts Entity records to a remote endpoint over HTTP.
-
-    Args:
-      item: A workitem containing the entities to post.
-
-    Returns:
-      A pair of the estimated size of the request in bytes and the response
-        from the server as a str.
-    """
-    entities = item.content
-    db.put(entities)
-
-
-class WorkItem(object):
+    assert not self.progress_event.isSet()
+
+    self.state = new_state
+
+    self.progress_queue.put(self)
+
+    if blocking:
+      self.progress_event.wait()
+
+      self.progress_event.clear()
+
+
+
+class WorkItem(_WorkItem):
   """Holds a unit of uploading work.
 
   A WorkItem represents a number of entities that need to be uploaded to
@@ -854,78 +1025,572 @@
       progress_key: If this WorkItem represents state from a prior run,
         then this will be the key within the progress database.
     """
-    self.state = STATE_READ
-
-    self.progress_queue = progress_queue
+    _WorkItem.__init__(self, progress_queue, key_start, key_end,
+                       ImportStateName, state=STATE_READ,
+                       progress_key=progress_key)
 
     assert isinstance(key_start, (int, long))
     assert isinstance(key_end, (int, long))
     assert key_start <= key_end
 
-    self.key_start = key_start
-    self.key_end = key_end
-    self.progress_key = progress_key
-
-    self.progress_event = threading.Event()
-
     self.rows = rows
     self.content = None
     self.count = len(rows)
 
-  def MarkAsRead(self):
-    """Mark this WorkItem as read/consumed from the data source."""
-
-    assert self.state == STATE_READ
-
-    self._StateTransition(STATE_READ, blocking=True)
-
-    assert self.progress_key is not None
-
-  def MarkAsSending(self):
-    """Mark this WorkItem as in-process on being uploaded to the server."""
-
-    assert self.state == STATE_READ or self.state == STATE_NOT_SENT
-    assert self.progress_key is not None
-
-    self._StateTransition(STATE_SENDING, blocking=True)
-
-  def MarkAsSent(self):
+  def __str__(self):
+    return '[%s-%s]' % (self.key_start, self.key_end)
+
+  def MarkAsTransferred(self):
     """Mark this WorkItem as sucessfully-sent to the server."""
 
-    assert self.state == STATE_SENDING
-    assert self.progress_key is not None
+    self._AssertInState(STATE_SENDING)
+    self._AssertProgressKey()
 
     self._StateTransition(STATE_SENT, blocking=False)
 
-  def MarkAsError(self):
-    """Mark this WorkItem as required manual error recovery."""
-
-    assert self.state == STATE_SENDING
-    assert self.progress_key is not None
-
-    self._StateTransition(STATE_NOT_SENT, blocking=True)
-
-  def _StateTransition(self, new_state, blocking=False):
-    """Transition the work item to a new state, storing progress information.
+
+def GetImplementationClass(kind_or_class_key):
+  """Returns the implementation class for a given kind or class key.
+
+  Args:
+    kind_or_class_key: A kind string or a tuple of kind strings.
+
+  Return:
+    A db.Model subclass for the given kind or class key.
+  """
+  if isinstance(kind_or_class_key, tuple):
+    try:
+      implementation_class = polymodel._class_map[kind_or_class_key]
+    except KeyError:
+      raise db.KindError('No implementation for class \'%s\'' %
+                         kind_or_class_key)
+  else:
+    implementation_class = db.class_for_kind(kind_or_class_key)
+  return implementation_class
+
+class EmptyQuery(db.Query):
+  def get(self):
+    return None
+
+  def fetch(self, limit=1000, offset=0):
+    return []
+
+  def count(self, limit=1000):
+    return 0
+
+
+def KeyLEQ(key1, key2):
+  """Compare two keys for less-than-or-equal-to.
+
+  All keys with numeric ids come before all keys with names.
+
+  Args:
+    key1: An int or db.Key instance.
+    key2: An int or db.Key instance.
+
+  Returns:
+    True if key1 <= key2
+  """
+  if isinstance(key1, int) and isinstance(key2, int):
+    return key1 <= key2
+  if key1 is None or key2 is None:
+    return True
+  if key1.id() and not key2.id():
+    return True
+  return key1.id_or_name() <= key2.id_or_name()
+
+
+class KeyRange(_WorkItem):
+  """Represents an item of download work.
+
+  A KeyRange object represents a key range (key_start, key_end) and a
+  scan direction (KeyRange.DESC or KeyRange.ASC).  The KeyRange object
+  has an associated state: STATE_READ, STATE_GETTING, STATE_GOT, and
+  STATE_ERROR.
+
+  - STATE_READ indicates the range ready to be downloaded by a worker thread.
+  - STATE_GETTING indicates the range is currently being downloaded.
+  - STATE_GOT indicates that the range was successfully downloaded
+  - STATE_ERROR indicates that an error occurred during the last download
+    attempt
+
+  KeyRanges not in the STATE_GOT state are stored in the progress database.
+  When a piece of KeyRange work is downloaded, the download may cover only
+  a portion of the range.  In this case, the old KeyRange is removed from
+  the progress database and ranges covering the undownloaded range are
+  generated and stored as STATE_READ in the export progress database.
+  """
+
+  DESC = 0
+  ASC = 1
+
+  MAX_KEY_LEN = 500
+
+  def __init__(self,
+               progress_queue,
+               kind,
+               direction,
+               key_start=None,
+               key_end=None,
+               include_start=True,
+               include_end=True,
+               progress_key=None,
+               state=STATE_READ):
+    """Initialize a KeyRange object.
+
+    Args:
+      progress_queue: A queue used for tracking progress information.
+      kind: The kind of entities for this range.
+      direction: The direction of the query for this range.
+      key_start: The starting key for this range.
+      key_end: The ending key for this range.
+      include_start: Whether the start key should be included in the range.
+      include_end: Whether the end key should be included in the range.
+      progress_key: The key for this range within the progress database.
+      state: The initial state of this range.
+
+    Raises:
+      KeyRangeError: if key_start is None.
+    """
+    assert direction in (KeyRange.ASC, KeyRange.DESC)
+    _WorkItem.__init__(self, progress_queue, key_start, key_end,
+                       ExportStateName, state=state, progress_key=progress_key)
+    self.kind = kind
+    self.direction = direction
+    self.export_result = None
+    self.count = 0
+    self.include_start = include_start
+    self.include_end = include_end
+    self.SPLIT_KEY = db.Key.from_path(self.kind, unichr(0))
+
+  def __str__(self):
+    return '[%s-%s]' % (PrettyKey(self.key_start), PrettyKey(self.key_end))
+
+  def __repr__(self):
+    return self.__str__()
+
+  def MarkAsTransferred(self):
+    """Mark this KeyRange as transferred, updating the progress database."""
+    pass
+
+  def Process(self, export_result, num_threads, batch_size, work_queue):
+    """Mark this KeyRange as success, updating the progress database.
+
+    Process will split this KeyRange based on the content of export_result and
+    adds the unfinished ranges to the work queue.
+
+    Args:
+      export_result: An ExportResult instance.
+      num_threads: The number of threads for parallel transfers.
+      batch_size: The number of entities to transfer per request.
+      work_queue: The work queue to add unfinished ranges to.
+
+    Returns:
+      A list of KeyRanges representing undownloaded datastore key ranges.
+    """
+    self._AssertInState(STATE_GETTING)
+    self._AssertProgressKey()
+
+    self.export_result = export_result
+    self.count = len(export_result.keys)
+    if export_result.continued:
+      self._FinishedRange()._StateTransition(STATE_GOT, blocking=True)
+      self._AddUnfinishedRanges(num_threads, batch_size, work_queue)
+    else:
+      self._StateTransition(STATE_GOT, blocking=True)
+
+  def _FinishedRange(self):
+    """Returns the range completed by the export_result.
+
+    Returns:
+      A KeyRange representing a completed range.
+    """
+    assert self.export_result is not None
+
+    if self.direction == KeyRange.ASC:
+      key_start = self.key_start
+      if self.export_result.continued:
+        key_end = self.export_result.key_end
+      else:
+        key_end = self.key_end
+    else:
+      key_end = self.key_end
+      if self.export_result.continued:
+        key_start = self.export_result.key_start
+      else:
+        key_start = self.key_start
+
+    result = KeyRange(self.progress_queue,
+                      self.kind,
+                      key_start=key_start,
+                      key_end=key_end,
+                      direction=self.direction)
+
+    result.progress_key = self.progress_key
+    result.export_result = self.export_result
+    result.state = self.state
+    result.count = self.count
+    return result
+
+  def FilterQuery(self, query):
+    """Add query filter to restrict to this key range.
 
     Args:
-      new_state: The state to transition to.
-      blocking: Whether to block for the progress thread to acknowledge the
-        transition.
+      query: A db.Query instance.
+    """
+    if self.key_start == self.key_end and not (
+        self.include_start or self.include_end):
+      return EmptyQuery()
+    if self.include_start:
+      start_comparator = '>='
+    else:
+      start_comparator = '>'
+    if self.include_end:
+      end_comparator = '<='
+    else:
+      end_comparator = '<'
+    if self.key_start and self.key_end:
+      query.filter('__key__ %s' % start_comparator, self.key_start)
+      query.filter('__key__ %s' % end_comparator, self.key_end)
+    elif self.key_start:
+      query.filter('__key__ %s' % start_comparator, self.key_start)
+    elif self.key_end:
+      query.filter('__key__ %s' % end_comparator, self.key_end)
+
+    return query
+
+  def MakeParallelQuery(self):
+    """Construct a query for this key range, for parallel downloading.
+
+    Returns:
+      A db.Query instance.
+
+    Raises:
+      KeyRangeError: if self.direction is not one of
+        KeyRange.ASC, KeyRange.DESC
+    """
+    if self.direction == KeyRange.ASC:
+      direction = ''
+    elif self.direction == KeyRange.DESC:
+      direction = '-'
+    else:
+      raise KeyRangeError('KeyRange direction unexpected: %s', self.direction)
+    query = db.Query(GetImplementationClass(self.kind))
+    query.order('%s__key__' % direction)
+
+    return self.FilterQuery(query)
+
+  def MakeSerialQuery(self):
+    """Construct a query for this key range without descending __key__ scan.
+
+    Returns:
+      A db.Query instance.
+    """
+    query = db.Query(GetImplementationClass(self.kind))
+    query.order('__key__')
+
+    return self.FilterQuery(query)
+
+  def _BisectStringRange(self, start, end):
+    if start == end:
+      return (start, start, end)
+    start += '\0'
+    end += '\0'
+    midpoint = []
+    expected_max = 127
+    for i in xrange(min(len(start), len(end))):
+      if start[i] == end[i]:
+        midpoint.append(start[i])
+      else:
+        ord_sum = ord(start[i]) + ord(end[i])
+        midpoint.append(unichr(ord_sum / 2))
+        if ord_sum % 2:
+          if len(start) > i + 1:
+            ord_start = ord(start[i+1])
+          else:
+            ord_start = 0
+          if ord_start < expected_max:
+            ord_split = (expected_max + ord_start) / 2
+          else:
+            ord_split = (0xFFFF + ord_start) / 2
+          midpoint.append(unichr(ord_split))
+        break
+    return (start[:-1], ''.join(midpoint), end[:-1])
+
+  def SplitRange(self, key_start, include_start, key_end, include_end,
+                 export_result, num_threads, batch_size, work_queue):
+    """Split the key range [key_start, key_end] into a list of ranges."""
+    if export_result.direction == KeyRange.ASC:
+      key_start = export_result.key_end
+      include_start = False
+    else:
+      key_end = export_result.key_start
+      include_end = False
+    key_pairs = []
+    if not key_start:
+      key_pairs.append((key_start, include_start, key_end, include_end,
+                        KeyRange.ASC))
+    elif not key_end:
+      key_pairs.append((key_start, include_start, key_end, include_end,
+                        KeyRange.DESC))
+    elif work_queue.qsize() > 2 * num_threads:
+      key_pairs.append((key_start, include_start, key_end, include_end,
+                        KeyRange.ASC))
+    elif key_start.id() and key_end.id():
+      if key_end.id() - key_start.id() > batch_size:
+        key_half = db.Key.from_path(self.kind,
+                                    (key_start.id() + key_end.id()) / 2)
+        key_pairs.append((key_start, include_start,
+                          key_half, True,
+                          KeyRange.DESC))
+        key_pairs.append((key_half, False,
+                          key_end, include_end,
+                          KeyRange.ASC))
+      else:
+        key_pairs.append((key_start, include_start, key_end, include_end,
+                          KeyRange.ASC))
+    elif key_start.name() and key_end.name():
+      (start, middle, end) = self._BisectStringRange(key_start.name(),
+                                                     key_end.name())
+      key_pairs.append((key_start, include_start,
+                        db.Key.from_path(self.kind, middle), True,
+                        KeyRange.DESC))
+      key_pairs.append((db.Key.from_path(self.kind, middle), False,
+                        key_end, include_end,
+                        KeyRange.ASC))
+    else:
+      assert key_start.id() and key_end.name()
+      key_pairs.append((key_start, include_start,
+                        self.SPLIT_KEY, False,
+                        KeyRange.DESC))
+      key_pairs.append((self.SPLIT_KEY, True,
+                        key_end, include_end,
+                        KeyRange.ASC))
+
+    ranges = [KeyRange(self.progress_queue,
+                       self.kind,
+                       key_start=start,
+                       include_start=include_start,
+                       key_end=end,
+                       include_end=include_end,
+                       direction=direction)
+              for (start, include_start, end, include_end, direction)
+              in key_pairs]
+
+    for key_range in ranges:
+      key_range.MarkAsRead()
+      work_queue.put(key_range, block=True)
+
+  def _AddUnfinishedRanges(self, num_threads, batch_size, work_queue):
+    """Adds incomplete KeyRanges to the work_queue.
+
+    Args:
+      num_threads: The number of threads for parallel transfers.
+      batch_size: The number of entities to transfer per request.
+      work_queue: The work queue to add unfinished ranges to.
+
+    Returns:
+      A list of KeyRanges representing incomplete datastore key ranges.
+
+    Raises:
+      KeyRangeError: if this key range has already been completely transferred.
     """
-    logging.debug('[%s-%s] %s' %
-                  (self.key_start, self.key_end, StateMessage(self.state)))
-    assert not self.progress_event.isSet()
-
-    self.state = new_state
-
-    self.progress_queue.put(self)
-
-    if blocking:
-      self.progress_event.wait()
-
-      self.progress_event.clear()
-
+    assert self.export_result is not None
+    if self.export_result.continued:
+      self.SplitRange(self.key_start, self.include_start, self.key_end,
+                      self.include_end, self.export_result,
+                      num_threads, batch_size, work_queue)
+    else:
+      raise KeyRangeError('No unfinished part of key range.')
+
+
+class RequestManager(object):
+  """A class which wraps a connection to the server."""
+
+  def __init__(self,
+               app_id,
+               host_port,
+               url_path,
+               kind,
+               throttle,
+               batch_size,
+               secure,
+               email,
+               passin):
+    """Initialize a RequestManager object.
+
+    Args:
+      app_id: String containing the application id for requests.
+      host_port: String containing the "host:port" pair; the port is optional.
+      url_path: partial URL (path) to post entity data to.
+      kind: Kind of the Entity records being posted.
+      throttle: A Throttle instance.
+      batch_size: The number of entities to transfer per request.
+      secure: Use SSL when communicating with server.
+      email: If not none, the username to log in with.
+      passin: If True, the password will be read from standard in.
+    """
+    self.app_id = app_id
+    self.host_port = host_port
+    self.host = host_port.split(':')[0]
+    if url_path and url_path[0] != '/':
+      url_path = '/' + url_path
+    self.url_path = url_path
+    self.kind = kind
+    self.throttle = throttle
+    self.batch_size = batch_size
+    self.secure = secure
+    self.authenticated = False
+    self.auth_called = False
+    self.parallel_download = True
+    self.email = email
+    self.passin = passin
+    throttled_rpc_server_factory = ThrottledHttpRpcServerFactory(
+        self.throttle, self)
+    logger.debug('Configuring remote_api. url_path = %s, '
+                 'servername = %s' % (url_path, host_port))
+    remote_api_stub.ConfigureRemoteDatastore(
+        app_id,
+        url_path,
+        self.AuthFunction,
+        servername=host_port,
+        rpc_server_factory=throttled_rpc_server_factory,
+        secure=self.secure)
+    logger.debug('Bulkloader using app_id: %s', os.environ['APPLICATION_ID'])
+
+  def Authenticate(self):
+    """Invoke authentication if necessary."""
+    logger.info('Connecting to %s', self.url_path)
+    self.rpc_server.Send(self.url_path, payload=None)
+    self.authenticated = True
+
+  def AuthFunction(self,
+                   raw_input_fn=raw_input,
+                   password_input_fn=getpass.getpass):
+    """Prompts the user for a username and password.
+
+    Caches the results the first time it is called and returns the
+    same result every subsequent time.
+
+    Args:
+      raw_input_fn: Used for dependency injection.
+      password_input_fn: Used for dependency injection.
+
+    Returns:
+      A pair of the username and password.
+    """
+    if self.email:
+      email = self.email
+    else:
+      print 'Please enter login credentials for %s' % (
+          self.host)
+      email = raw_input_fn('Email: ')
+
+    if email:
+      password_prompt = 'Password for %s: ' % email
+      if self.passin:
+        password = raw_input_fn(password_prompt)
+      else:
+        password = password_input_fn(password_prompt)
+    else:
+      password = None
+
+    self.auth_called = True
+    return (email, password)
+
+  def EncodeContent(self, rows, loader=None):
+    """Encodes row data to the wire format.
+
+    Args:
+      rows: A list of pairs of a line number and a list of column values.
+      loader: Used for dependency injection.
+
+    Returns:
+      A list of db.Model instances.
+
+    Raises:
+      ConfigurationError: if no loader is defined for self.kind
+    """
+    if not loader:
+      try:
+        loader = Loader.RegisteredLoader(self.kind)
+      except KeyError:
+        logger.error('No Loader defined for kind %s.' % self.kind)
+        raise ConfigurationError('No Loader defined for kind %s.' % self.kind)
+    entities = []
+    for line_number, values in rows:
+      key = loader.generate_key(line_number, values)
+      if isinstance(key, db.Key):
+        parent = key.parent()
+        key = key.name()
+      else:
+        parent = None
+      entity = loader.create_entity(values, key_name=key, parent=parent)
+      if isinstance(entity, list):
+        entities.extend(entity)
+      elif entity:
+        entities.append(entity)
+
+    return entities
+
+  def PostEntities(self, item):
+    """Posts Entity records to a remote endpoint over HTTP.
+
+    Args:
+      item: A workitem containing the entities to post.
+
+    Returns:
+      A pair of the estimated size of the request in bytes and the response
+        from the server as a str.
+    """
+    entities = item.content
+    db.put(entities)
+
+  def GetEntities(self, key_range):
+    """Gets Entity records from a remote endpoint over HTTP.
+
+    Args:
+     key_range: Range of keys to get.
+
+    Returns:
+      An ExportResult instance.
+
+    Raises:
+      ConfigurationError: if no Exporter is defined for self.kind
+    """
+    try:
+      Exporter.RegisteredExporter(self.kind)
+    except KeyError:
+      raise ConfigurationError('No Exporter defined for kind %s.' % self.kind)
+
+    keys = []
+    entities = []
+
+    if self.parallel_download:
+      query = key_range.MakeParallelQuery()
+      try:
+        results = query.fetch(self.batch_size)
+      except datastore_errors.NeedIndexError:
+        logger.info('%s: No descending index on __key__, '
+                    'performing serial download', self.kind)
+        self.parallel_download = False
+
+    if not self.parallel_download:
+      key_range.direction = KeyRange.ASC
+      query = key_range.MakeSerialQuery()
+      results = query.fetch(self.batch_size)
+
+    size = len(results)
+
+    for model in results:
+      key = model.key()
+      entities.append(cPickle.dumps(model))
+      keys.append(key)
+
+    continued = (size == self.batch_size)
+    key_range.count = size
+
+    return ExportResult(continued, key_range.direction, keys, entities)
 
 
 def InterruptibleSleep(sleep_time):
@@ -961,7 +1626,19 @@
   failed upload, the number of active threads is reduced by one.
   """
 
-  def __init__(self, enabled, sleep=InterruptibleSleep):
+  def __init__(self, enabled,
+               threshhold1=MAXIMUM_INCREASE_DURATION,
+               threshhold2=MAXIMUM_HOLD_DURATION,
+               sleep=InterruptibleSleep):
+    """Constructor for ThreadGate instances.
+
+    Args:
+      enabled: Whether the thread gate is enabled
+      threshhold1: Maximum duration (in seconds) for a transfer to increase
+        the number of active threads.
+      threshhold2: Maximum duration (in seconds) for a transfer to not decrease
+        the number of active threads.
+    """
     self.enabled = enabled
     self.enabled_count = 1
     self.lock = threading.Lock()
@@ -969,6 +1646,8 @@
     self._threads = []
     self.backoff_time = 0
     self.sleep = sleep
+    self.threshhold1 = threshhold1
+    self.threshhold2 = threshhold2
 
   def Register(self, thread):
     """Register a thread with the thread gate."""
@@ -990,7 +1669,7 @@
 
   def EnableAllThreads(self):
     """Enable all worker threads."""
-    for unused_idx in range(len(self._threads) - self.enabled_count):
+    for unused_idx in xrange(len(self._threads) - self.enabled_count):
       self.EnableThread()
 
   def StartWork(self):
@@ -1004,8 +1683,8 @@
       self.thread_semaphore.acquire()
       if self.backoff_time > 0.0:
         if not threading.currentThread().exit_flag:
-          logging.info('Backing off: %.1f seconds',
-                       self.backoff_time)
+          logger.info('Backing off: %.1f seconds',
+                      self.backoff_time)
         self.sleep(self.backoff_time)
 
   def FinishWork(self):
@@ -1013,15 +1692,22 @@
     if self.enabled:
       self.thread_semaphore.release()
 
-  def IncreaseWorkers(self):
+  def TransferSuccess(self, duration):
     """Informs the throttler that an item was successfully sent.
 
-    If thread throttling is enabled, this method will cause an
-    additional thread to run in the critical section.
+    If thread throttling is enabled and the duration is low enough, this
+    method will cause an additional thread to run in the critical section.
+
+    Args:
+      duration: The duration of the transfer in seconds.
     """
-    if self.enabled:
+    if duration > self.threshhold2:
+      self.DecreaseWorkers()
+    elif duration > self.threshhold1:
+      return
+    elif self.enabled:
       if self.backoff_time > 0.0:
-        logging.info('Resetting backoff to 0.0')
+        logger.info('Resetting backoff to 0.0')
         self.backoff_time = 0.0
       do_enable = False
       self.lock.acquire()
@@ -1032,6 +1718,8 @@
       finally:
         self.lock.release()
       if do_enable:
+        logger.debug('Increasing active thread count to %d',
+                     self.enabled_count)
         self.thread_semaphore.release()
 
   def DecreaseWorkers(self):
@@ -1058,6 +1746,8 @@
       finally:
         self.lock.release()
       if do_disable:
+        logger.debug('Decreasing the number of active threads to %d',
+                     self.enabled_count)
         self.thread_semaphore.acquire()
 
 
@@ -1207,10 +1897,10 @@
       if sleep_time < MINIMUM_THROTTLE_SLEEP_DURATION:
         break
 
-      logging.debug('[%s] Throttling on %s. Sleeping for %.1f ms '
-                    '(duration=%.1f ms, total=%d)',
-                    thread.getName(), throttle_name,
-                    sleep_time * 1000, duration * 1000, total)
+      logger.debug('[%s] Throttling on %s. Sleeping for %.1f ms '
+                   '(duration=%.1f ms, total=%d)',
+                   thread.getName(), throttle_name,
+                   sleep_time * 1000, duration * 1000, total)
       self.thread_sleep(sleep_time)
       if thread.exit_flag:
         break
@@ -1299,15 +1989,15 @@
 
   def run(self):
     """Perform the work of the thread."""
-    logging.info('[%s] %s: started', self.getName(), self.__class__.__name__)
+    logger.info('[%s] %s: started', self.getName(), self.__class__.__name__)
 
     try:
       self.PerformWork()
     except:
       self.error = sys.exc_info()[1]
-      logging.exception('[%s] %s:', self.getName(), self.__class__.__name__)
-
-    logging.info('[%s] %s: exiting', self.getName(), self.__class__.__name__)
+      logger.exception('[%s] %s:', self.getName(), self.__class__.__name__)
+
+    logger.info('[%s] %s: exiting', self.getName(), self.__class__.__name__)
 
   def PerformWork(self):
     """Perform the thread-specific work."""
@@ -1316,7 +2006,7 @@
   def CheckError(self):
     """If an error is present, then log it."""
     if self.error:
-      logging.error('Error in %s: %s', self.GetFriendlyName(), self.error)
+      logger.error('Error in %s: %s', self.GetFriendlyName(), self.error)
 
   def GetFriendlyName(self):
     """Returns a human-friendly description of the thread."""
@@ -1325,7 +2015,184 @@
     return 'unknown thread'
 
 
-class BulkLoaderThread(_ThreadBase):
+non_fatal_error_codes = set([errno.EAGAIN,
+                             errno.ENETUNREACH,
+                             errno.ENETRESET,
+                             errno.ECONNRESET,
+                             errno.ETIMEDOUT,
+                             errno.EHOSTUNREACH])
+
+
+def IsURLErrorFatal(error):
+  """Returns False if the given URLError may be from a transient failure.
+
+  Args:
+    error: A urllib2.URLError instance.
+  """
+  assert isinstance(error, urllib2.URLError)
+  if not hasattr(error, 'reason'):
+    return True
+  if not isinstance(error.reason[0], int):
+    return True
+  return error.reason[0] not in non_fatal_error_codes
+
+
+def PrettyKey(key):
+  """Returns a nice string representation of the given key."""
+  if key is None:
+    return None
+  elif isinstance(key, db.Key):
+    return repr(key.id_or_name())
+  return str(key)
+
+
+class _BulkWorkerThread(_ThreadBase):
+  """A base class for worker threads.
+
+  This thread will read WorkItem instances from the work_queue and upload
+  the entities to the server application. Progress information will be
+  pushed into the progress_queue as the work is being performed.
+
+  If a _BulkWorkerThread encounters a transient error, the entities will be
+  resent, if a fatal error is encoutered the BulkWorkerThread exits.
+
+  Subclasses must provide implementations for PreProcessItem, TransferItem,
+  and ProcessResponse.
+  """
+
+  def __init__(self,
+               work_queue,
+               throttle,
+               thread_gate,
+               request_manager,
+               num_threads,
+               batch_size,
+               state_message,
+               get_time):
+    """Initialize the BulkLoaderThread instance.
+
+    Args:
+      work_queue: A queue containing WorkItems for processing.
+      throttle: A Throttles to control upload bandwidth.
+      thread_gate: A ThreadGate to control number of simultaneous uploads.
+      request_manager: A RequestManager instance.
+      num_threads: The number of threads for parallel transfers.
+      batch_size: The number of entities to transfer per request.
+      state_message: Used for dependency injection.
+      get_time: Used for dependency injection.
+    """
+    _ThreadBase.__init__(self)
+
+    self.work_queue = work_queue
+    self.throttle = throttle
+    self.thread_gate = thread_gate
+    self.request_manager = request_manager
+    self.num_threads = num_threads
+    self.batch_size = batch_size
+    self.state_message = state_message
+    self.get_time = get_time
+
+  def PreProcessItem(self, item):
+    """Performs pre transfer processing on a work item."""
+    raise NotImplementedError()
+
+  def TransferItem(self, item):
+    """Transfers the entities associated with an item.
+
+    Args:
+      item: An item of upload (WorkItem) or download (KeyRange) work.
+
+    Returns:
+      A tuple of (estimated transfer size, response)
+    """
+    raise NotImplementedError()
+
+  def ProcessResponse(self, item, result):
+    """Processes the response from the server application."""
+    raise NotImplementedError()
+
+  def PerformWork(self):
+    """Perform the work of a _BulkWorkerThread."""
+    while not self.exit_flag:
+      transferred = False
+      self.thread_gate.StartWork()
+      try:
+        try:
+          item = self.work_queue.get(block=True, timeout=1.0)
+        except Queue.Empty:
+          continue
+        if item == _THREAD_SHOULD_EXIT:
+          break
+
+        logger.debug('[%s] Got work item %s', self.getName(), item)
+
+        try:
+
+          item.MarkAsTransferring()
+          self.PreProcessItem(item)
+          response = None
+          try:
+            try:
+              t = self.get_time()
+              response = self.TransferItem(item)
+              status = 200
+              transferred = True
+              transfer_time = self.get_time() - t
+              logger.debug('[%s] %s Transferred %d entities', self.getName(),
+                           item, item.count)
+              self.throttle.AddTransfer(RECORDS, item.count)
+            except (db.InternalError, db.NotSavedError, db.Timeout,
+                    apiproxy_errors.OverQuotaError,
+                    apiproxy_errors.DeadlineExceededError), e:
+              logger.exception('Caught non-fatal datastore error: %s', e)
+            except urllib2.HTTPError, e:
+              status = e.code
+              if status == 403 or (status >= 500 and status < 600):
+                logger.exception('Caught non-fatal HTTP error: %d %s',
+                                 status, e.msg)
+              else:
+                raise e
+            except urllib2.URLError, e:
+              if IsURLErrorFatal(e):
+                raise e
+              else:
+                logger.exception('Caught non-fatal URL error: %s', e.reason)
+
+            self.ProcessResponse(item, response)
+
+          except:
+            self.error = sys.exc_info()[1]
+            logger.exception('[%s] %s: caught exception %s', self.getName(),
+                             self.__class__.__name__, str(sys.exc_info()))
+            raise
+
+        finally:
+          if transferred:
+            item.MarkAsTransferred()
+            self.thread_gate.TransferSuccess(transfer_time)
+            self.work_queue.task_done()
+          else:
+            item.MarkAsError()
+            try:
+              self.work_queue.reput(item, block=False)
+            except Queue.Full:
+              logger.error('[%s] Failed to reput work item.', self.getName())
+              raise Error('Failed to reput work item')
+            self.thread_gate.DecreaseWorkers()
+          logger.info('%s %s',
+                      item,
+                      self.state_message(item.state))
+
+      finally:
+        self.thread_gate.FinishWork()
+
+
+  def GetFriendlyName(self):
+    """Returns a human-friendly name for this thread."""
+    return 'worker [%s]' % self.getName()
+
+
+class BulkLoaderThread(_BulkWorkerThread):
   """A thread which transmits entities to the server application.
 
   This thread will read WorkItem instances from the work_queue and upload
@@ -1340,7 +2207,10 @@
                work_queue,
                throttle,
                thread_gate,
-               request_manager):
+               request_manager,
+               num_threads,
+               batch_size,
+               get_time=time.time):
     """Initialize the BulkLoaderThread instance.
 
     Args:
@@ -1348,82 +2218,102 @@
       throttle: A Throttles to control upload bandwidth.
       thread_gate: A ThreadGate to control number of simultaneous uploads.
       request_manager: A RequestManager instance.
+      num_threads: The number of threads for parallel transfers.
+      batch_size: The number of entities to transfer per request.
+      get_time: Used for dependency injection.
     """
-    _ThreadBase.__init__(self)
-
-    self.work_queue = work_queue
-    self.throttle = throttle
-    self.thread_gate = thread_gate
-
-    self.request_manager = request_manager
-
-  def PerformWork(self):
-    """Perform the work of a BulkLoaderThread."""
-    while not self.exit_flag:
-      success = False
-      self.thread_gate.StartWork()
-      try:
-        try:
-          item = self.work_queue.get(block=True, timeout=1.0)
-        except Queue.Empty:
-          continue
-        if item == _THREAD_SHOULD_EXIT:
-          break
-
-        logging.debug('[%s] Got work item [%d-%d]',
-                      self.getName(), item.key_start, item.key_end)
-
-        try:
-
-          item.MarkAsSending()
-          try:
-            if item.content is None:
-              item.content = self.request_manager.EncodeContent(item.rows)
-            try:
-              self.request_manager.PostEntities(item)
-              success = True
-              logging.debug(
-                  '[%d-%d] Sent %d entities',
-                  item.key_start, item.key_end, item.count)
-              self.throttle.AddTransfer(RECORDS, item.count)
-            except (db.InternalError, db.NotSavedError, db.Timeout), e:
-              logging.debug('Caught non-fatal error: %s', e)
-            except urllib2.HTTPError, e:
-              if e.code == 403 or (e.code >= 500 and e.code < 600):
-                logging.debug('Caught HTTP error %d', e.code)
-                logging.debug('%s', e.read())
-              else:
-                raise e
-
-          except:
-            self.error = sys.exc_info()[1]
-            logging.exception('[%s] %s: caught exception %s', self.getName(),
-                              self.__class__.__name__, str(sys.exc_info()))
-            raise
-
-        finally:
-          if success:
-            item.MarkAsSent()
-            self.thread_gate.IncreaseWorkers()
-            self.work_queue.task_done()
-          else:
-            item.MarkAsError()
-            self.thread_gate.DecreaseWorkers()
-            try:
-              self.work_queue.reput(item, block=False)
-            except Queue.Full:
-              logging.error('[%s] Failed to reput work item.', self.getName())
-              raise Error('Failed to reput work item')
-          logging.info('[%d-%d] %s',
-                       item.key_start, item.key_end, StateMessage(item.state))
-
-      finally:
-        self.thread_gate.FinishWork()
-
-
-  def GetFriendlyName(self):
-    """Returns a human-friendly name for this thread."""
-    return 'worker [%s]' % self.getName()
+    _BulkWorkerThread.__init__(self,
+                               work_queue,
+                               throttle,
+                               thread_gate,
+                               request_manager,
+                               num_threads,
+                               batch_size,
+                               ImportStateMessage,
+                               get_time)
+
+  def PreProcessItem(self, item):
+    """Performs pre transfer processing on a work item."""
+    if item and not item.content:
+      item.content = self.request_manager.EncodeContent(item.rows)
+
+  def TransferItem(self, item):
+    """Transfers the entities associated with an item.
+
+    Args:
+      item: An item of upload (WorkItem) work.
+
+    Returns:
+      A tuple of (estimated transfer size, response)
+    """
+    return self.request_manager.PostEntities(item)
+
+  def ProcessResponse(self, item, response):
+    """Processes the response from the server application."""
+    pass
+
+
+class BulkExporterThread(_BulkWorkerThread):
+  """A thread which recieved entities to the server application.
+
+  This thread will read KeyRange instances from the work_queue and export
+  the entities from the server application. Progress information will be
+  pushed into the progress_queue as the work is being performed.
+
+  If a BulkExporterThread encounters an error when trying to post data,
+  the thread will exit and cause the application to terminate.
+  """
+
+  def __init__(self,
+               work_queue,
+               throttle,
+               thread_gate,
+               request_manager,
+               num_threads,
+               batch_size,
+               get_time=time.time):
+
+    """Initialize the BulkExporterThread instance.
+
+    Args:
+      work_queue: A queue containing KeyRanges for processing.
+      throttle: A Throttles to control upload bandwidth.
+      thread_gate: A ThreadGate to control number of simultaneous uploads.
+      request_manager: A RequestManager instance.
+      num_threads: The number of threads for parallel transfers.
+      batch_size: The number of entities to transfer per request.
+      get_time: Used for dependency injection.
+    """
+    _BulkWorkerThread.__init__(self,
+                               work_queue,
+                               throttle,
+                               thread_gate,
+                               request_manager,
+                               num_threads,
+                               batch_size,
+                               ExportStateMessage,
+                               get_time)
+
+  def PreProcessItem(self, unused_item):
+    """Performs pre transfer processing on a work item."""
+    pass
+
+  def TransferItem(self, item):
+    """Transfers the entities associated with an item.
+
+    Args:
+      item: An item of download (KeyRange) work.
+
+    Returns:
+      A tuple of (estimated transfer size, response)
+    """
+    return self.request_manager.GetEntities(item)
+
+  def ProcessResponse(self, item, export_result):
+    """Processes the response from the server application."""
+    if export_result:
+      item.Process(export_result, self.num_threads, self.batch_size,
+                   self.work_queue)
 
 
 class DataSourceThread(_ThreadBase):
@@ -1471,7 +2361,7 @@
     content_gen = self.workitem_generator_factory(self.progress_queue,
                                                   progress_gen)
 
-    self.sent_count = 0
+    self.xfer_count = 0
     self.read_count = 0
     self.read_all = False
 
@@ -1492,7 +2382,7 @@
     if not self.exit_flag:
       self.read_all = True
     self.read_count = content_gen.row_count
-    self.sent_count = content_gen.sent_count
+    self.xfer_count = content_gen.xfer_count
 
 
 
@@ -1501,62 +2391,95 @@
   return threading.currentThread().getName() == thread.getName()
 
 
-class ProgressDatabase(object):
-  """Persistently record all progress information during an upload.
-
-  This class wraps a very simple SQLite database which records each of
-  the relevant details from the WorkItem instances. If the uploader is
-  resumed, then data is replayed out of the database.
+class _Database(object):
+  """Base class for database connections in this module.
+
+  The table is created by a primary thread (the python main thread)
+  but all future lookups and updates are performed by a secondary
+  thread.
   """
 
-  def __init__(self, db_filename, commit_periodicity=100):
-    """Initialize the ProgressDatabase instance.
+  SIGNATURE_TABLE_NAME = 'bulkloader_database_signature'
+
+  def __init__(self,
+               db_filename,
+               create_table,
+               signature,
+               index=None,
+               commit_periodicity=100):
+    """Initialize the _Database instance.
 
     Args:
-      db_filename: The name of the SQLite database to use.
-      commit_periodicity: How many operations to perform between commits.
+      db_filename: The sqlite3 file to use for the database.
+      create_table: A string containing the SQL table creation command.
+      signature: A string identifying the important invocation options,
+        used to make sure we are not using an old database.
+      index: An optional string to create an index for the database.
+      commit_periodicity: Number of operations between database commits.
     """
     self.db_filename = db_filename
 
-    logging.info('Using progress database: %s', db_filename)
+    logger.info('Opening database: %s', db_filename)
     self.primary_conn = sqlite3.connect(db_filename, isolation_level=None)
     self.primary_thread = threading.currentThread()
 
-    self.progress_conn = None
-    self.progress_thread = None
+    self.secondary_conn = None
+    self.secondary_thread = None
 
     self.operation_count = 0
     self.commit_periodicity = commit_periodicity
 
-    self.prior_key_end = None
-
     try:
-      self.primary_conn.execute(
-          """create table progress (
-          id integer primary key autoincrement,
-          state integer not null,
-          key_start integer not null,
-          key_end integer not null
-          )
-          """)
+      self.primary_conn.execute(create_table)
     except sqlite3.OperationalError, e:
       if 'already exists' not in e.message:
         raise
 
+    if index:
+      try:
+        self.primary_conn.execute(index)
+      except sqlite3.OperationalError, e:
+        if 'already exists' not in e.message:
+          raise
+
+    self.existing_table = False
+    signature_cursor = self.primary_conn.cursor()
+    create_signature = """
+      create table %s (
+      value TEXT not null)
+    """ % _Database.SIGNATURE_TABLE_NAME
     try:
-      self.primary_conn.execute('create index i_state on progress (state)')
+      self.primary_conn.execute(create_signature)
+      self.primary_conn.cursor().execute(
+          'insert into %s (value) values (?)' % _Database.SIGNATURE_TABLE_NAME,
+          (signature,))
     except sqlite3.OperationalError, e:
       if 'already exists' not in e.message:
+        logger.exception('Exception creating table:')
         raise
+      else:
+        self.existing_table = True
+        signature_cursor.execute(
+            'select * from %s' % _Database.SIGNATURE_TABLE_NAME)
+        (result,) = signature_cursor.fetchone()
+        if result and result != signature:
+          logger.error('Database signature mismatch:\n\n'
+                       'Found:\n'
+                       '%s\n\n'
+                       'Expecting:\n'
+                       '%s\n',
+                       result, signature)
+          raise ResumeError('Database signature mismatch: %s != %s' % (
+                            signature, result))
 
   def ThreadComplete(self):
-    """Finalize any operations the progress thread has performed.
+    """Finalize any operations the secondary thread has performed.
 
     The database aggregates lots of operations into a single commit, and
     this method is used to commit any pending operations as the thread
     is about to shut down.
     """
-    if self.progress_conn:
+    if self.secondary_conn:
       self._MaybeCommit(force_commit=True)
 
   def _MaybeCommit(self, force_commit=False):
@@ -1573,28 +2496,175 @@
     """
     self.operation_count += 1
     if force_commit or (self.operation_count % self.commit_periodicity) == 0:
-      self.progress_conn.commit()
-
-  def _OpenProgressConnection(self):
-    """Possibly open a database connection for the progress tracker thread.
+      self.secondary_conn.commit()
+
+  def _OpenSecondaryConnection(self):
+    """Possibly open a database connection for the secondary thread.
 
     If the connection is not open (for the calling thread, which is assumed
-    to be the progress tracker thread), then open it. We also open a couple
+    to be the unique secondary thread), then open it. We also open a couple
     cursors for later use (and reuse).
     """
-    if self.progress_conn:
+    if self.secondary_conn:
       return
 
     assert not _RunningInThread(self.primary_thread)
 
-    self.progress_thread = threading.currentThread()
-
-    self.progress_conn = sqlite3.connect(self.db_filename)
-
-    self.insert_cursor = self.progress_conn.cursor()
-    self.update_cursor = self.progress_conn.cursor()
-
-  def HasUnfinishedWork(self):
+    self.secondary_thread = threading.currentThread()
+
+    self.secondary_conn = sqlite3.connect(self.db_filename)
+
+    self.insert_cursor = self.secondary_conn.cursor()
+    self.update_cursor = self.secondary_conn.cursor()
+
+
+class ResultDatabase(_Database):
+  """Persistently record all the entities downloaded during an export.
+
+  The entities are held in the database by their unique datastore key
+  in order to avoid duplication if an export is restarted.
+  """
+
+  def __init__(self, db_filename, signature, commit_periodicity=1):
+    """Initialize a ResultDatabase object.
+
+    Args:
+      db_filename: The name of the SQLite database to use.
+      signature: A string identifying the important invocation options,
+        used to make sure we are not using an old database.
+      commit_periodicity: How many operations to perform between commits.
+    """
+    self.complete = False
+    create_table = ('create table result (\n'
+                    'id TEXT primary key,\n'
+                    'value BLOB not null)')
+
+    _Database.__init__(self,
+                       db_filename,
+                       create_table,
+                       signature,
+                       commit_periodicity=commit_periodicity)
+    if self.existing_table:
+      cursor = self.primary_conn.cursor()
+      cursor.execute('select count(*) from result')
+      self.existing_count = int(cursor.fetchone()[0])
+    else:
+      self.existing_count = 0
+    self.count = self.existing_count
+
+  def _StoreEntity(self, entity_id, value):
+    """Store an entity in the result database.
+
+    Args:
+      entity_id: A db.Key for the entity.
+      value: A string of the contents of the entity.
+
+    Returns:
+      True if this entities is not already present in the result database.
+    """
+
+    assert _RunningInThread(self.secondary_thread)
+    assert isinstance(entity_id, db.Key)
+
+    entity_id = entity_id.id_or_name()
+    self.insert_cursor.execute(
+        'select count(*) from result where id = ?', (unicode(entity_id),))
+    already_present = self.insert_cursor.fetchone()[0]
+    result = True
+    if already_present:
+      result = False
+      self.insert_cursor.execute('delete from result where id = ?',
+                                 (unicode(entity_id),))
+    else:
+      self.count += 1
+    self.insert_cursor.execute(
+        'insert into result (id, value) values (?, ?)',
+        (unicode(entity_id), buffer(value)))
+    return result
+
+  def StoreEntities(self, keys, entities):
+    """Store a group of entities in the result database.
+
+    Args:
+      keys: A list of entity keys.
+      entities: A list of entities.
+
+    Returns:
+      The number of new entities stored in the result database.
+    """
+    self._OpenSecondaryConnection()
+    t = time.time()
+    count = 0
+    for entity_id, value in zip(keys,
+                                entities):
+      if self._StoreEntity(entity_id, value):
+        count += 1
+    logger.debug('%s insert: delta=%.3f',
+                 self.db_filename,
+                 time.time() - t)
+    logger.debug('Entities transferred total: %s', self.count)
+    self._MaybeCommit()
+    return count
+
+  def ResultsComplete(self):
+    """Marks the result database as containing complete results."""
+    self.complete = True
+
+  def AllEntities(self):
+    """Yields all pairs of (id, value) from the result table."""
+    conn = sqlite3.connect(self.db_filename, isolation_level=None)
+    cursor = conn.cursor()
+
+    cursor.execute(
+        'select id, value from result order by id')
+
+    for unused_entity_id, entity in cursor:
+      yield cPickle.loads(str(entity))
+
+
+class _ProgressDatabase(_Database):
+  """Persistently record all progress information during an upload.
+
+  This class wraps a very simple SQLite database which records each of
+  the relevant details from a chunk of work. If the loader is
+  resumed, then data is replayed out of the database.
+  """
+
+  def __init__(self,
+               db_filename,
+               sql_type,
+               py_type,
+               signature,
+               commit_periodicity=100):
+    """Initialize the ProgressDatabase instance.
+
+    Args:
+      db_filename: The name of the SQLite database to use.
+      sql_type: A string of the SQL type to use for entity keys.
+      py_type: The python type of entity keys.
+      signature: A string identifying the important invocation options,
+        used to make sure we are not using an old database.
+      commit_periodicity: How many operations to perform between commits.
+    """
+    self.prior_key_end = None
+
+    create_table = ('create table progress (\n'
+                    'id integer primary key autoincrement,\n'
+                    'state integer not null,\n'
+                    'key_start %s,\n'
+                    'key_end %s)'
+                    % (sql_type, sql_type))
+    self.py_type = py_type
+
+    index = 'create index i_state on progress (state)'
+    _Database.__init__(self,
+                       db_filename,
+                       create_table,
+                       signature,
+                       index=index,
+                       commit_periodicity=commit_periodicity)
+
+  def UseProgressData(self):
     """Returns True if the database has progress information.
 
     Note there are two basic cases for progress information:
@@ -1605,10 +2675,10 @@
        data.
 
     Returns:
-      True if the database has progress information, False otherwise.
+      True: if the database has progress information.
 
     Raises:
-      ResumeError: If there is an error reading the progress database.
+      ResumeError: if there is an error retrieving rows from the database.
     """
     assert _RunningInThread(self.primary_thread)
 
@@ -1616,7 +2686,7 @@
     cursor.execute('select count(*) from progress')
     row = cursor.fetchone()
     if row is None:
-      raise ResumeError('Error reading progress information.')
+      raise ResumeError('Cannot retrieve progress information from database.')
 
     return row[0] != 0
 
@@ -1642,20 +2712,18 @@
     Returns:
       A string to later be used as a unique key to update this state.
     """
-    self._OpenProgressConnection()
-
-    assert _RunningInThread(self.progress_thread)
-    assert isinstance(key_start, int)
-    assert isinstance(key_end, int)
-    assert key_start <= key_end
-
-    if self.prior_key_end is not None:
-      assert key_start > self.prior_key_end
-    self.prior_key_end = key_end
+    self._OpenSecondaryConnection()
+
+    assert _RunningInThread(self.secondary_thread)
+    assert not key_start or isinstance(key_start, self.py_type)
+    assert not key_end or isinstance(key_end, self.py_type), '%s is a %s' % (
+        key_end, key_end.__class__)
+    assert KeyLEQ(key_start, key_end), '%s not less than %s' % (
+        repr(key_start), repr(key_end))
 
     self.insert_cursor.execute(
         'insert into progress (state, key_start, key_end) values (?, ?, ?)',
-        (STATE_READ, key_start, key_end))
+        (STATE_READ, unicode(key_start), unicode(key_end)))
 
     progress_key = self.insert_cursor.lastrowid
 
@@ -1670,9 +2738,9 @@
       key: The key for this progress record, returned from StoreKeys
       new_state: The new state to associate with this progress record.
     """
-    self._OpenProgressConnection()
-
-    assert _RunningInThread(self.progress_thread)
+    self._OpenSecondaryConnection()
+
+    assert _RunningInThread(self.secondary_thread)
     assert isinstance(new_state, int)
 
     self.update_cursor.execute('update progress set state=? where id=?',
@@ -1680,8 +2748,22 @@
 
     self._MaybeCommit()
 
+  def DeleteKey(self, progress_key):
+    """Delete the entities with the given key from the result database."""
+    self._OpenSecondaryConnection()
+
+    assert _RunningInThread(self.secondary_thread)
+
+    t = time.time()
+    self.insert_cursor.execute(
+        'delete from progress where rowid = ?', (progress_key,))
+
+    logger.debug('delete: delta=%.3f', time.time() - t)
+
+    self._MaybeCommit()
+
   def GetProgressStatusGenerator(self):
-    """Get a generator which returns progress information.
+    """Get a generator which yields progress information.
 
     The returned generator will yield a series of 4-tuples that specify
     progress information about a prior run of the uploader. The 4-tuples
@@ -1706,16 +2788,19 @@
     The caller should begin uploading records which occur after key_end.
 
     Yields:
-      Progress information as tuples (progress_key, state, key_start, key_end).
+      Four-tuples of (progress_key, state, key_start, key_end)
     """
     conn = sqlite3.connect(self.db_filename, isolation_level=None)
     cursor = conn.cursor()
 
-    cursor.execute('select max(id) from progress')
-    batch_id = cursor.fetchone()[0]
-
-    cursor.execute('select key_end from progress where id = ?', (batch_id,))
-    key_end = cursor.fetchone()[0]
+    cursor.execute('select max(key_end) from progress')
+
+    result = cursor.fetchone()
+    if result is not None:
+      key_end = result[0]
+    else:
+      logger.debug('No rows in progress database.')
+      return
 
     self.prior_key_end = key_end
 
@@ -1730,16 +2815,43 @@
     for row in rows:
       if row is None:
         break
-
-      yield row
+      progress_key, state, key_start, key_end = row
+
+      yield progress_key, state, key_start, key_end
 
     yield None, DATA_CONSUMED_TO_HERE, None, key_end
 
 
+def ProgressDatabase(db_filename, signature):
+  """Returns a database to store upload progress information."""
+  return _ProgressDatabase(db_filename, 'INTEGER', int, signature)
+
+
+class ExportProgressDatabase(_ProgressDatabase):
+  """A database to store download progress information."""
+
+  def __init__(self, db_filename, signature):
+    """Initialize an ExportProgressDatabase."""
+    _ProgressDatabase.__init__(self,
+                               db_filename,
+                               'TEXT',
+                               db.Key,
+                               signature,
+                               commit_periodicity=1)
+
+  def UseProgressData(self):
+    """Check if the progress database contains progress data.
+
+    Returns:
+      True: if the database contains progress data.
+    """
+    return self.existing_table
+
+
 class StubProgressDatabase(object):
   """A stub implementation of ProgressDatabase which does nothing."""
 
-  def HasUnfinishedWork(self):
+  def UseProgressData(self):
     """Whether the stub database has progress information (it doesn't)."""
     return False
 
@@ -1756,7 +2868,7 @@
     pass
 
 
-class ProgressTrackerThread(_ThreadBase):
+class _ProgressThreadBase(_ThreadBase):
   """A thread which records progress information for the upload process.
 
   The progress information is stored into the provided progress database.
@@ -1779,7 +2891,23 @@
 
     self.progress_queue = progress_queue
     self.db = progress_db
-    self.entities_sent = 0
+    self.entities_transferred = 0
+
+  def EntitiesTransferred(self):
+    """Return the total number of unique entities transferred."""
+    return self.entities_transferred
+
+  def UpdateProgress(self, item):
+    """Updates the progress information for the given item.
+
+    Args:
+      item: A work item whose new state will be recorded
+    """
+    raise NotImplementedError()
+
+  def WorkFinished(self):
+    """Performs final actions after the entity transfer is complete."""
+    raise NotImplementedError()
 
   def PerformWork(self):
     """Performs the work of a ProgressTrackerThread."""
@@ -1795,10 +2923,7 @@
         item.progress_key = self.db.StoreKeys(item.key_start, item.key_end)
       else:
         assert item.progress_key is not None
-
-        self.db.UpdateState(item.progress_key, item.state)
-        if item.state == STATE_SENT:
-          self.entities_sent += item.count
+        self.UpdateProgress(item)
 
       item.progress_event.set()
 
@@ -1808,6 +2933,106 @@
 
 
 
+class ProgressTrackerThread(_ProgressThreadBase):
+  """A thread which records progress information for the upload process.
+
+  The progress information is stored into the provided progress database.
+  This class is not responsible for replaying a prior run's progress
+  information out of the database. Separate mechanisms must be used to
+  resume a prior upload attempt.
+  """
+  NAME = 'progress tracking thread'
+
+  def __init__(self, progress_queue, progress_db):
+    """Initialize the ProgressTrackerThread instance.
+
+    Args:
+      progress_queue: A Queue used for tracking progress information.
+      progress_db: The database for tracking progress information; should
+        be an instance of ProgressDatabase.
+    """
+    _ProgressThreadBase.__init__(self, progress_queue, progress_db)
+
+  def UpdateProgress(self, item):
+    """Update the state of the given WorkItem.
+
+    Args:
+      item: A WorkItem instance.
+    """
+    self.db.UpdateState(item.progress_key, item.state)
+    if item.state == STATE_SENT:
+      self.entities_transferred += item.count
+
+  def WorkFinished(self):
+    """Performs final actions after the entity transfer is complete."""
+    pass
+
+
+class ExportProgressThread(_ProgressThreadBase):
+  """A thread to record progress information and write record data for exports.
+
+  The progress information is stored into a provided progress database.
+  Exported results are stored in the result database and dumped to an output
+  file at the end of the download.
+  """
+
+  def __init__(self, kind, progress_queue, progress_db, result_db):
+    """Initialize the ExportProgressThread instance.
+
+    Args:
+      kind: The kind of entities being stored in the database.
+      progress_queue: A Queue used for tracking progress information.
+      progress_db: The database for tracking progress information; should
+        be an instance of ProgressDatabase.
+      result_db: The database for holding exported entities; should be an
+        instance of ResultDatabase.
+    """
+    _ProgressThreadBase.__init__(self, progress_queue, progress_db)
+
+    self.kind = kind
+    self.existing_count = result_db.existing_count
+    self.result_db = result_db
+
+  def EntitiesTransferred(self):
+    """Return the total number of unique entities transferred."""
+    return self.result_db.count
+
+  def WorkFinished(self):
+    """Write the contents of the result database."""
+    exporter = Exporter.RegisteredExporter(self.kind)
+    exporter.output_entities(self.result_db.AllEntities())
+
+  def UpdateProgress(self, item):
+    """Update the state of the given KeyRange.
+
+    Args:
+      item: A KeyRange instance.
+    """
+    if item.state == STATE_GOT:
+      count = self.result_db.StoreEntities(item.export_result.keys,
+                                           item.export_result.entities)
+      self.db.DeleteKey(item.progress_key)
+      self.entities_transferred += count
+    else:
+      self.db.UpdateState(item.progress_key, item.state)
+
+
+def ParseKey(key_string):
+  """Turn a key stored in the database into a db.Key or None.
+
+  Args:
+    key_string: The string representation of a db.Key.
+
+  Returns:
+    A db.Key instance or None
+  """
+  if not key_string:
+    return None
+  if key_string == 'None':
+    return None
+  return db.Key(encoded=key_string)
+
+
 def Validate(value, typ):
   """Checks that value is non-empty and of the right type.
 
@@ -1816,9 +3041,8 @@
     typ: a type or tuple of types
 
   Raises:
-    ValueError if value is None or empty.
-    TypeError if it's not the given type.
-
+    ValueError: if value is None or empty.
+    TypeError: if it's not the given type.
   """
   if not value:
     raise ValueError('Value should not be empty; received %s.' % value)
@@ -1827,6 +3051,22 @@
                     (typ, value, value.__class__))
 
 
+def CheckFile(filename):
+  """Check that the given file exists and can be opened for reading.
+
+  Args:
+    filename: The name of the file.
+
+  Raises:
+    FileNotFoundError: if the given filename is not found
+    FileNotReadableError: if the given filename is not readable.
+  """
+  if not os.path.exists(filename):
+    raise FileNotFoundError('%s: file not found' % filename)
+  elif not os.access(filename, os.R_OK):
+    raise FileNotReadableError('%s: file not readable' % filename)
+
+
 class Loader(object):
   """A base class for creating datastore entities from input data.
 
@@ -1836,14 +3076,14 @@
 
   If you need to run extra code to convert entities from the input
   data, create new properties, or otherwise modify the entities before
-  they're inserted, override HandleEntity.
-
-  See the CreateEntity method for the creation of entities from the
+  they're inserted, override handle_entity.
+
+  See the create_entity method for the creation of entities from the
   (parsed) input data.
   """
 
   __loaders = {}
-  __kind = None
+  kind = None
   __properties = None
 
   def __init__(self, kind, properties):
@@ -1858,11 +3098,11 @@
 
       properties: list of (name, converter) tuples.
 
-        This is used to automatically convert the CSV columns into
+        This is used to automatically convert the input columns into
         properties.  The converter should be a function that takes one
-        argument, a string value from the CSV file, and returns a
+        argument, a string value from the input file, and returns a
         correctly typed property value that should be inserted. The
-        tuples in this list should match the columns in your CSV file,
+        tuples in this list should match the columns in your input file,
         in order.
 
         For example:
@@ -1874,10 +3114,12 @@
            ('description', datastore_types.Text),
            ]
     """
-    Validate(kind, basestring)
-    self.__kind = kind
-
-    db.class_for_kind(kind)
+    Validate(kind, (basestring, tuple))
+    self.kind = kind
+    self.__openfile = open
+    self.__create_csv_reader = csv.reader
+
+    GetImplementationClass(kind)
 
     Validate(properties, list)
     for name, fn in properties:
@@ -1890,49 +3132,66 @@
   @staticmethod
   def RegisterLoader(loader):
 
-    Loader.__loaders[loader.__kind] = loader
-
-  def kind(self):
-    """ Return the entity kind that this Loader handes.
-    """
-    return self.__kind
-
-  def CreateEntity(self, values, key_name=None):
+    Loader.__loaders[loader.kind] = loader
+
+  def alias_old_names(self):
+    """Aliases method names so that Loaders defined with old names work."""
+    aliases = (
+        ('CreateEntity', 'create_entity'),
+        ('HandleEntity', 'handle_entity'),
+        ('GenerateKey', 'generate_key'),
+        )
+    for old_name, new_name in aliases:
+      setattr(Loader, old_name, getattr(Loader, new_name))
+      if hasattr(self.__class__, old_name) and not (
+          getattr(self.__class__, old_name).im_func ==
+          getattr(Loader, new_name).im_func):
+        if hasattr(self.__class__, new_name) and not (
+            getattr(self.__class__, new_name).im_func ==
+            getattr(Loader, new_name).im_func):
+          raise NameClashError(old_name, new_name, self.__class__)
+        setattr(self, new_name, getattr(self, old_name))
+
+  def create_entity(self, values, key_name=None, parent=None):
     """Creates a entity from a list of property values.
 
     Args:
       values: list/tuple of str
       key_name: if provided, the name for the (single) resulting entity
+      parent: A db.Key instance for the parent, or None
 
     Returns:
       list of db.Model
 
       The returned entities are populated with the property values from the
       argument, converted to native types using the properties map given in
-      the constructor, and passed through HandleEntity. They're ready to be
+      the constructor, and passed through handle_entity. They're ready to be
       inserted.
 
     Raises:
-      AssertionError if the number of values doesn't match the number
+      AssertionError: if the number of values doesn't match the number
         of properties in the properties map.
-      ValueError if any element of values is None or empty.
-      TypeError if values is not a list or tuple.
+      ValueError: if any element of values is None or empty.
+      TypeError: if values is not a list or tuple.
     """
     Validate(values, (list, tuple))
     assert len(values) == len(self.__properties), (
-      'Expected %d CSV columns, found %d.' %
-      (len(self.__properties), len(values)))
-
-    model_class = db.class_for_kind(self.__kind)
-
-    properties = {'key_name': key_name}
+        'Expected %d columns, found %d.' %
+        (len(self.__properties), len(values)))
+
+    model_class = GetImplementationClass(self.kind)
+
+    properties = {
+        'key_name': key_name,
+        'parent': parent,
+        }
     for (name, converter), val in zip(self.__properties, values):
       if converter is bool and val.lower() in ('0', 'false', 'no'):
-          val = False
+        val = False
       properties[name] = converter(val)
 
     entity = model_class(**properties)
-    entities = self.HandleEntity(entity)
+    entities = self.handle_entity(entity)
 
     if entities:
       if not isinstance(entities, (list, tuple)):
@@ -1945,7 +3204,7 @@
 
     return entities
 
-  def GenerateKey(self, i, values):
+  def generate_key(self, i, values):
     """Generates a key_name to be used in creating the underlying object.
 
     The default implementation returns None.
@@ -1953,8 +3212,9 @@
     This method can be overridden to control the key generation for
     uploaded entities. The value returned should be None (to use a
     server generated numeric key), or a string which neither starts
-    with a digit nor has the form __*__. (See
-    http://code.google.com/appengine/docs/python/datastore/keysandentitygroups.html)
+    with a digit nor has the form __*__ (see
+    http://code.google.com/appengine/docs/python/datastore/keysandentitygroups.html),
+    or a db.Key instance.
 
     If you generate your own string keys, keep in mind:
 
@@ -1972,16 +3232,16 @@
     """
     return None
 
-  def HandleEntity(self, entity):
+  def handle_entity(self, entity):
     """Subclasses can override this to add custom entity conversion code.
 
-    This is called for each entity, after its properties are populated from
-    CSV but before it is stored. Subclasses can override this to add custom
-    entity handling code.
-
-    The entity to be inserted should be returned. If multiple entities should
-    be inserted, return a list of entities. If no entities should be inserted,
-    return None or [].
+    This is called for each entity, after its properties are populated
+    from the input but before it is stored. Subclasses can override
+    this to add custom entity handling code.
+
+    The entity to be inserted should be returned. If multiple entities
+    should be inserted, return a list of entities. If no entities
+    should be inserted, return None or [].
 
     Args:
       entity: db.Model
@@ -1991,12 +3251,213 @@
     """
     return entity
 
+  def initialize(self, filename, loader_opts):
+    """Performs initialization and validation of the input file.
+
+    This implementation checks that the input file exists and can be
+    opened for reading.
+
+    Args:
+      filename: The string given as the --filename flag argument.
+      loader_opts: The string given as the --loader_opts flag argument.
+    """
+    CheckFile(filename)
+
+  def finalize(self):
+    """Performs finalization actions after the upload completes."""
+    pass
+
+  def generate_records(self, filename):
+    """Subclasses can override this to add custom data input code.
+
+    This method must yield fixed-length lists of strings.
+
+    The default implementation uses csv.reader to read CSV rows
+    from filename.
+
+    Args:
+      filename: The string input for the --filename option.
+
+    Yields:
+      Lists of strings.
+    """
+    csv_generator = CSVGenerator(filename, openfile=self.__openfile,
+                                 create_csv_reader=self.__create_csv_reader
+                                ).Records()
+    return csv_generator
 
   @staticmethod
   def RegisteredLoaders():
-    """Returns a list of the Loader instances that have been created.
+    """Returns a dict of the Loader instances that have been created."""
+    return dict(Loader.__loaders)
+
+  @staticmethod
+  def RegisteredLoader(kind):
+    """Returns the loader instance for the given kind if it exists."""
+    return Loader.__loaders[kind]
+
+
+class Exporter(object):
+  """A base class for serializing datastore entities.
+
+  To add a handler for exporting an entity kind from your datastore,
+  write a subclass of this class that calls Exporter.__init__ from your
+  class's __init__.
+
+  If you need to run extra code to convert entities from the input
+  data, create new properties, or otherwise modify the entities before
+  they're inserted, override handle_entity.
+
+  See the output_entities method for the writing of data from entities.
+  """
+
+  __exporters = {}
+  kind = None
+  __properties = None
+
+  def __init__(self, kind, properties):
+    """Constructor.
+
+    Populates this Exporters's kind and properties map. Also registers
+    it so that all you need to do is instantiate your Exporter, and
+    the bulkload handler will automatically use it.
+
+    Args:
+      kind: a string containing the entity kind that this exporter handles
+
+      properties: list of (name, converter, default) tuples.
+
+      This is used to automatically convert the entities to strings.
+      The converter should be a function that takes one argument, a property
+      value of the appropriate type, and returns a str or unicode.  The default
+      is a string to be used if the property is not present, or None to fail
+      with an error if the property is missing.
+
+      For example:
+        [('name', str, None),
+         ('id_number', str, None),
+         ('email', str, ''),
+         ('user', str, None),
+         ('birthdate',
+          lambda x: str(datetime.datetime.fromtimestamp(float(x))),
+          None),
+         ('description', str, ''),
+         ]
     """
-    return dict(Loader.__loaders)
+    Validate(kind, basestring)
+    self.kind = kind
+
+    GetImplementationClass(kind)
+
+    Validate(properties, list)
+    for name, fn, default in properties:
+      Validate(name, basestring)
+      assert callable(fn), (
+          'Conversion function %s for property %s is not callable.' % (
+              fn, name))
+      if default:
+        Validate(default, basestring)
+
+    self.__properties = properties
+
+  @staticmethod
+  def RegisterExporter(exporter):
+
+    Exporter.__exporters[exporter.kind] = exporter
+
+  def __ExtractProperties(self, entity):
+    """Converts an entity into a list of string values.
+
+    Args:
+      entity: An entity to extract the properties from.
+
+    Returns:
+      A list of the properties of the entity.
+
+    Raises:
+      MissingPropertyError: if an expected field on the entity is missing.
+    """
+    encoding = []
+    for name, fn, default in self.__properties:
+      try:
+        encoding.append(fn(getattr(entity, name)))
+      except AttributeError:
+        if default is None:
+          raise MissingPropertyError(name)
+        else:
+          encoding.append(default)
+    return encoding
+
+  def __EncodeEntity(self, entity):
+    """Convert the given entity into CSV string.
+
+    Args:
+      entity: The entity to encode.
+
+    Returns:
+      A CSV string.
+    """
+    output = StringIO.StringIO()
+    writer = csv.writer(output, lineterminator='')
+    writer.writerow(self.__ExtractProperties(entity))
+    return output.getvalue()
+
+  def __SerializeEntity(self, entity):
+    """Creates a string representation of an entity.
+
+    Args:
+      entity: The entity to serialize.
+
+    Returns:
+      A serialized representation of an entity.
+    """
+    encoding = self.__EncodeEntity(entity)
+    if not isinstance(encoding, unicode):
+      encoding = unicode(encoding, 'utf-8')
+    encoding = encoding.encode('utf-8')
+    return encoding
+
+  def output_entities(self, entity_generator):
+    """Outputs the downloaded entities.
+
+    This implementation writes CSV.
+
+    Args:
+      entity_generator: A generator that yields the downloaded entities
+        in key order.
+    """
+    CheckOutputFile(self.output_filename)
+    output_file = open(self.output_filename, 'w')
+    logger.debug('Export complete, writing to file')
+    output_file.writelines(self.__SerializeEntity(entity) + '\n'
+                           for entity in entity_generator)
+
+  def initialize(self, filename, exporter_opts):
+    """Performs initialization and validation of the output file.
+
+    This implementation checks that the input file exists and can be
+    opened for writing.
+
+    Args:
+      filename: The string given as the --filename flag argument.
+      exporter_opts: The string given as the --exporter_opts flag argument.
+    """
+    CheckOutputFile(filename)
+    self.output_filename = filename
+
+  def finalize(self):
+    """Performs finalization actions after the download completes."""
+    pass
+
+  @staticmethod
+  def RegisteredExporters():
+    """Returns a dictionary of the exporter instances that have been created."""
+    return dict(Exporter.__exporters)
+
+  @staticmethod
+  def RegisteredExporter(kind):
+    """Returns an exporter instance for the given kind if it exists."""
+    return Exporter.__exporters[kind]
 
 
 class QueueJoinThread(threading.Thread):
@@ -2045,7 +3506,7 @@
     if not thread.isAlive():
       return True
     if thread_local.shut_down:
-      logging.debug('Queue join interrupted')
+      logger.debug('Queue join interrupted')
       return False
     for worker_thread in thread_gate.Threads():
       if not worker_thread.isAlive():
@@ -2060,7 +3521,7 @@
     work_queue: The work queue.
     thread_gate: A ThreadGate instance with workers registered.
   """
-  logging.info('An error occurred. Shutting down...')
+  logger.info('An error occurred. Shutting down...')
 
   data_source_thread.exit_flag = True
 
@@ -2072,8 +3533,8 @@
 
   data_source_thread.join(timeout=3.0)
   if data_source_thread.isAlive():
-    logging.warn('%s hung while trying to exit',
-                 data_source_thread.GetFriendlyName())
+    logger.warn('%s hung while trying to exit',
+                data_source_thread.GetFriendlyName())
 
   while not work_queue.empty():
     try:
@@ -2083,175 +3544,255 @@
       pass
 
 
-def PerformBulkUpload(app_id,
-                      post_url,
-                      kind,
-                      workitem_generator_factory,
-                      num_threads,
-                      throttle,
-                      progress_db,
-                      max_queue_size=DEFAULT_QUEUE_SIZE,
-                      request_manager_factory=RequestManager,
-                      bulkloaderthread_factory=BulkLoaderThread,
-                      progresstrackerthread_factory=ProgressTrackerThread,
-                      datasourcethread_factory=DataSourceThread,
-                      work_queue_factory=ReQueue,
-                      progress_queue_factory=Queue.Queue):
-  """Uploads data into an application using a series of HTTP POSTs.
-
-  This function will spin up a number of threads to read entities from
-  the data source, pass those to a number of worker ("uploader") threads
-  for sending to the application, and track all of the progress in a
-  small database in case an error or pause/termination requires a
-  restart/resumption of the upload process.
-
-  Args:
-    app_id: String containing application id.
-    post_url: URL to post the Entity data to.
-    kind: Kind of the Entity records being posted.
-    workitem_generator_factory: A factory that creates a WorkItem generator.
-    num_threads: How many uploader threads should be created.
-    throttle: A Throttle instance.
-    progress_db: The database to use for replaying/recording progress.
-    max_queue_size: Maximum size of the queues before they should block.
-    request_manager_factory: Used for dependency injection.
-    bulkloaderthread_factory: Used for dependency injection.
-    progresstrackerthread_factory: Used for dependency injection.
-    datasourcethread_factory: Used for dependency injection.
-    work_queue_factory: Used for dependency injection.
-    progress_queue_factory: Used for dependency injection.
-
-  Raises:
-    AuthenticationError: If authentication is required and fails.
-  """
-  thread_gate = ThreadGate(True)
-
-  (unused_scheme,
-   host_port, url_path,
-   unused_query, unused_fragment) = urlparse.urlsplit(post_url)
-
-  work_queue = work_queue_factory(max_queue_size)
-  progress_queue = progress_queue_factory(max_queue_size)
-  request_manager = request_manager_factory(app_id,
-                                            host_port,
-                                            url_path,
-                                            kind,
-                                            throttle)
-
-  throttle.Register(threading.currentThread())
-  try:
-    request_manager.Authenticate()
-  except Exception, e:
-    logging.exception(e)
-    raise AuthenticationError('Authentication failed')
-  if (request_manager.credentials is not None and
-      not request_manager.authenticated):
-    raise AuthenticationError('Authentication failed')
-
-  for unused_idx in range(num_threads):
-    thread = bulkloaderthread_factory(work_queue,
-                                      throttle,
-                                      thread_gate,
-                                      request_manager)
-    throttle.Register(thread)
-    thread_gate.Register(thread)
-
-  progress_thread = progresstrackerthread_factory(progress_queue, progress_db)
-
-  if progress_db.HasUnfinishedWork():
-    logging.debug('Restarting upload using progress database')
-    progress_generator_factory = progress_db.GetProgressStatusGenerator
-  else:
-    progress_generator_factory = None
-
-  data_source_thread = datasourcethread_factory(work_queue,
-                                                progress_queue,
-                                                workitem_generator_factory,
-                                                progress_generator_factory)
-
-  thread_local = threading.local()
-  thread_local.shut_down = False
-
-  def Interrupt(unused_signum, unused_frame):
-    """Shutdown gracefully in response to a signal."""
-    thread_local.shut_down = True
-
-  signal.signal(signal.SIGINT, Interrupt)
-
-  progress_thread.start()
-  data_source_thread.start()
-  for thread in thread_gate.Threads():
-    thread.start()
-
-
-  while not thread_local.shut_down:
-    data_source_thread.join(timeout=0.25)
-
-    if data_source_thread.isAlive():
-      for thread in list(thread_gate.Threads()) + [progress_thread]:
-        if not thread.isAlive():
-          logging.info('Unexpected thread death: %s', thread.getName())
-          thread_local.shut_down = True
-          break
+class BulkTransporterApp(object):
+  """Class to wrap bulk transport application functionality."""
+
+  def __init__(self,
+               arg_dict,
+               input_generator_factory,
+               throttle,
+               progress_db,
+               workerthread_factory,
+               progresstrackerthread_factory,
+               max_queue_size=DEFAULT_QUEUE_SIZE,
+               request_manager_factory=RequestManager,
+               datasourcethread_factory=DataSourceThread,
+               work_queue_factory=ReQueue,
+               progress_queue_factory=Queue.Queue):
+    """Instantiate a BulkTransporterApp.
+
+    Uploads or downloads data to or from application using HTTP requests.
+    When run, the class will spin up a number of threads to read entities
+    from the data source, pass those to a number of worker threads
+    for sending to the application, and track all of the progress in a
+    small database in case an error or pause/termination requires a
+    restart/resumption of the upload process.
+
+    Args:
+      arg_dict: Dictionary of command line options.
+      input_generator_factory: A factory that creates a WorkItem generator.
+      throttle: A Throttle instance.
+      progress_db: The database to use for replaying/recording progress.
+      workerthread_factory: A factory for worker threads.
+      progresstrackerthread_factory: Used for dependency injection.
+      max_queue_size: Maximum size of the queues before they should block.
+      request_manager_factory: Used for dependency injection.
+      datasourcethread_factory: Used for dependency injection.
+      work_queue_factory: Used for dependency injection.
+      progress_queue_factory: Used for dependency injection.
+    """
+    self.app_id = arg_dict['app_id']
+    self.post_url = arg_dict['url']
+    self.kind = arg_dict['kind']
+    self.batch_size = arg_dict['batch_size']
+    self.input_generator_factory = input_generator_factory
+    self.num_threads = arg_dict['num_threads']
+    self.email = arg_dict['email']
+    self.passin = arg_dict['passin']
+    self.throttle = throttle
+    self.progress_db = progress_db
+    self.workerthread_factory = workerthread_factory
+    self.progresstrackerthread_factory = progresstrackerthread_factory
+    self.max_queue_size = max_queue_size
+    self.request_manager_factory = request_manager_factory
+    self.datasourcethread_factory = datasourcethread_factory
+    self.work_queue_factory = work_queue_factory
+    self.progress_queue_factory = progress_queue_factory
+    (scheme,
+     self.host_port, self.url_path,
+     unused_query, unused_fragment) = urlparse.urlsplit(self.post_url)
+    self.secure = (scheme == 'https')
+
+  def Run(self):
+    """Perform the work of the BulkTransporterApp.
+
+    Raises:
+      AuthenticationError: If authentication is required and fails.
+
+    Returns:
+      Error code suitable for sys.exit, e.g. 0 on success, 1 on failure.
+    """
+    thread_gate = ThreadGate(True)
+
+    self.throttle.Register(threading.currentThread())
+    threading.currentThread().exit_flag = False
+
+    work_queue = self.work_queue_factory(self.max_queue_size)
+
+    progress_queue = self.progress_queue_factory(self.max_queue_size)
+    request_manager = self.request_manager_factory(self.app_id,
+                                                   self.host_port,
+                                                   self.url_path,
+                                                   self.kind,
+                                                   self.throttle,
+                                                   self.batch_size,
+                                                   self.secure,
+                                                   self.email,
+                                                   self.passin)
+    try:
+      request_manager.Authenticate()
+    except Exception, e:
+      if not isinstance(e, urllib2.HTTPError) or (
+          e.code != 302 and e.code != 401):
+        logger.exception('Exception during authentication')
+      raise AuthenticationError()
+    if (request_manager.auth_called and
+        not request_manager.authenticated):
+      raise AuthenticationError('Authentication failed')
+
+    for unused_idx in xrange(self.num_threads):
+      thread = self.workerthread_factory(work_queue,
+                                         self.throttle,
+                                         thread_gate,
+                                         request_manager,
+                                         self.num_threads,
+                                         self.batch_size)
+      self.throttle.Register(thread)
+      thread_gate.Register(thread)
+
+    self.progress_thread = self.progresstrackerthread_factory(
+        progress_queue, self.progress_db)
+
+    if self.progress_db.UseProgressData():
+      logger.debug('Restarting upload using progress database')
+      progress_generator_factory = self.progress_db.GetProgressStatusGenerator
     else:
-      break
-
-  if thread_local.shut_down:
-    ShutdownThreads(data_source_thread, work_queue, thread_gate)
-
-  def _Join(ob, msg):
-    logging.debug('Waiting for %s...', msg)
-    if isinstance(ob, threading.Thread):
-      ob.join(timeout=3.0)
-      if ob.isAlive():
-        logging.debug('Joining %s failed', ob.GetFriendlyName())
+      progress_generator_factory = None
+
+    self.data_source_thread = (
+        self.datasourcethread_factory(work_queue,
+                                      progress_queue,
+                                      self.input_generator_factory,
+                                      progress_generator_factory))
+
+    thread_local = threading.local()
+    thread_local.shut_down = False
+
+    def Interrupt(unused_signum, unused_frame):
+      """Shutdown gracefully in response to a signal."""
+      thread_local.shut_down = True
+
+    signal.signal(signal.SIGINT, Interrupt)
+
+    self.progress_thread.start()
+    self.data_source_thread.start()
+    for thread in thread_gate.Threads():
+      thread.start()
+
+
+    while not thread_local.shut_down:
+      self.data_source_thread.join(timeout=0.25)
+
+      if self.data_source_thread.isAlive():
+        for thread in list(thread_gate.Threads()) + [self.progress_thread]:
+          if not thread.isAlive():
+            logger.info('Unexpected thread death: %s', thread.getName())
+            thread_local.shut_down = True
+            break
       else:
-        logging.debug('... done.')
-    elif isinstance(ob, (Queue.Queue, ReQueue)):
-      if not InterruptibleQueueJoin(ob, thread_local, thread_gate):
-        ShutdownThreads(data_source_thread, work_queue, thread_gate)
+        break
+
+    if thread_local.shut_down:
+      ShutdownThreads(self.data_source_thread, work_queue, thread_gate)
+
+    def _Join(ob, msg):
+      logger.debug('Waiting for %s...', msg)
+      if isinstance(ob, threading.Thread):
+        ob.join(timeout=3.0)
+        if ob.isAlive():
+          logger.debug('Joining %s failed', ob.GetFriendlyName())
+        else:
+          logger.debug('... done.')
+      elif isinstance(ob, (Queue.Queue, ReQueue)):
+        if not InterruptibleQueueJoin(ob, thread_local, thread_gate):
+          ShutdownThreads(self.data_source_thread, work_queue, thread_gate)
+      else:
+        ob.join()
+        logger.debug('... done.')
+
+    _Join(work_queue, 'work_queue to flush')
+
+    for unused_thread in thread_gate.Threads():
+      work_queue.put(_THREAD_SHOULD_EXIT)
+
+    for unused_thread in thread_gate.Threads():
+      thread_gate.EnableThread()
+
+    for thread in thread_gate.Threads():
+      _Join(thread, 'thread [%s] to terminate' % thread.getName())
+
+      thread.CheckError()
+
+    if self.progress_thread.isAlive():
+      _Join(progress_queue, 'progress_queue to finish')
     else:
-      ob.join()
-      logging.debug('... done.')
-
-  _Join(work_queue, 'work_queue to flush')
-
-  for unused_thread in thread_gate.Threads():
-    work_queue.put(_THREAD_SHOULD_EXIT)
-
-  for unused_thread in thread_gate.Threads():
-    thread_gate.EnableThread()
-
-  for thread in thread_gate.Threads():
-    _Join(thread, 'thread [%s] to terminate' % thread.getName())
-
-    thread.CheckError()
-
-  if progress_thread.isAlive():
-    _Join(progress_queue, 'progress_queue to finish')
-  else:
-    logging.warn('Progress thread exited prematurely')
-
-  progress_queue.put(_THREAD_SHOULD_EXIT)
-  _Join(progress_thread, 'progress_thread to terminate')
-  progress_thread.CheckError()
-
-  data_source_thread.CheckError()
-
-  total_up, duration = throttle.TotalTransferred(BANDWIDTH_UP)
-  s_total_up, unused_duration = throttle.TotalTransferred(HTTPS_BANDWIDTH_UP)
-  total_up += s_total_up
-  logging.info('%d entites read, %d previously transferred',
-               data_source_thread.read_count,
-               data_source_thread.sent_count)
-  logging.info('%d entities (%d bytes) transferred in %.1f seconds',
-               progress_thread.entities_sent, total_up, duration)
-  if (data_source_thread.read_all and
-      progress_thread.entities_sent + data_source_thread.sent_count >=
-      data_source_thread.read_count):
-    logging.info('All entities successfully uploaded')
-  else:
-    logging.info('Some entities not successfully uploaded')
+      logger.warn('Progress thread exited prematurely')
+
+    progress_queue.put(_THREAD_SHOULD_EXIT)
+    _Join(self.progress_thread, 'progress_thread to terminate')
+    self.progress_thread.CheckError()
+    if not thread_local.shut_down:
+      self.progress_thread.WorkFinished()
+
+    self.data_source_thread.CheckError()
+
+    return self.ReportStatus()
+
+  def ReportStatus(self):
+    """Display a message reporting the final status of the transfer."""
+    raise NotImplementedError()
+
+
+class BulkUploaderApp(BulkTransporterApp):
+  """Class to encapsulate bulk uploader functionality."""
+
+  def __init__(self, *args, **kwargs):
+    BulkTransporterApp.__init__(self, *args, **kwargs)
+
+  def ReportStatus(self):
+    """Display a message reporting the final status of the transfer."""
+    total_up, duration = self.throttle.TotalTransferred(BANDWIDTH_UP)
+    s_total_up, unused_duration = self.throttle.TotalTransferred(
+        HTTPS_BANDWIDTH_UP)
+    total_up += s_total_up
+    total = total_up
+    logger.info('%d entites total, %d previously transferred',
+                self.data_source_thread.read_count,
+                self.data_source_thread.xfer_count)
+    transfer_count = self.progress_thread.EntitiesTransferred()
+    logger.info('%d entities (%d bytes) transferred in %.1f seconds',
+                transfer_count, total, duration)
+    if (self.data_source_thread.read_all and
+        transfer_count +
+        self.data_source_thread.xfer_count >=
+        self.data_source_thread.read_count):
+      logger.info('All entities successfully transferred')
+      return 0
+    else:
+      logger.info('Some entities not successfully transferred')
+      return 1
+
+
+class BulkDownloaderApp(BulkTransporterApp):
+  """Class to encapsulate bulk downloader functionality."""
+
+  def __init__(self, *args, **kwargs):
+    BulkTransporterApp.__init__(self, *args, **kwargs)
+
+  def ReportStatus(self):
+    """Display a message reporting the final status of the transfer."""
+    total_down, duration = self.throttle.TotalTransferred(BANDWIDTH_DOWN)
+    s_total_down, unused_duration = self.throttle.TotalTransferred(
+        HTTPS_BANDWIDTH_DOWN)
+    total_down += s_total_down
+    total = total_down
+    existing_count = self.progress_thread.existing_count
+    xfer_count = self.progress_thread.EntitiesTransferred()
+    logger.info('Have %d entities, %d previously transferred',
+                xfer_count + existing_count, existing_count)
+    logger.info('%d entities (%d bytes) transferred in %.1f seconds',
+                xfer_count, total, duration)
+    return 0
 
 
 def PrintUsageExit(code):
@@ -2266,6 +3807,35 @@
   sys.exit(code)
 
 
+REQUIRED_OPTION = object()
+
+
+FLAG_SPEC = ['debug',
+             'help',
+             'url=',
+             'filename=',
+             'batch_size=',
+             'kind=',
+             'num_threads=',
+             'bandwidth_limit=',
+             'rps_limit=',
+             'http_limit=',
+             'db_filename=',
+             'app_id=',
+             'config_file=',
+             'has_header',
+             'csv_has_header',
+             'auth_domain=',
+             'result_db_filename=',
+             'download',
+             'loader_opts=',
+             'exporter_opts=',
+             'log_file=',
+             'email=',
+             'passin',
+             ]
+
+
 def ParseArguments(argv):
   """Parses command-line arguments.
 
@@ -2275,87 +3845,98 @@
     argv: List of command-line arguments.
 
   Returns:
-    Tuple (url, filename, cookie, batch_size, kind) containing the values from
-    each corresponding command-line flag.
+    A dictionary containing the value of command-line options.
   """
   opts, unused_args = getopt.getopt(
       argv[1:],
       'h',
-      ['debug',
-       'help',
-       'url=',
-       'filename=',
-       'batch_size=',
-       'kind=',
-       'num_threads=',
-       'bandwidth_limit=',
-       'rps_limit=',
-       'http_limit=',
-       'db_filename=',
-       'app_id=',
-       'config_file=',
-       'auth_domain=',
-      ])
-
-  url = None
-  filename = None
-  batch_size = DEFAULT_BATCH_SIZE
-  kind = None
-  num_threads = DEFAULT_THREAD_COUNT
-  bandwidth_limit = DEFAULT_BANDWIDTH_LIMIT
-  rps_limit = DEFAULT_RPS_LIMIT
-  http_limit = DEFAULT_REQUEST_LIMIT
-  db_filename = None
-  app_id = None
-  config_file = None
-  auth_domain = 'gmail.com'
+      FLAG_SPEC)
+
+  arg_dict = {}
+
+  arg_dict['url'] = REQUIRED_OPTION
+  arg_dict['filename'] = REQUIRED_OPTION
+  arg_dict['config_file'] = REQUIRED_OPTION
+  arg_dict['kind'] = REQUIRED_OPTION
+
+  arg_dict['batch_size'] = DEFAULT_BATCH_SIZE
+  arg_dict['num_threads'] = DEFAULT_THREAD_COUNT
+  arg_dict['bandwidth_limit'] = DEFAULT_BANDWIDTH_LIMIT
+  arg_dict['rps_limit'] = DEFAULT_RPS_LIMIT
+  arg_dict['http_limit'] = DEFAULT_REQUEST_LIMIT
+
+  arg_dict['db_filename'] = None
+  arg_dict['app_id'] = ''
+  arg_dict['auth_domain'] = 'gmail.com'
+  arg_dict['has_header'] = False
+  arg_dict['result_db_filename'] = None
+  arg_dict['download'] = False
+  arg_dict['loader_opts'] = None
+  arg_dict['exporter_opts'] = None
+  arg_dict['debug'] = False
+  arg_dict['log_file'] = None
+  arg_dict['email'] = None
+  arg_dict['passin'] = False
+
+  def ExpandFilename(filename):
+    """Expand shell variables and ~usernames in filename."""
+    return os.path.expandvars(os.path.expanduser(filename))
 
   for option, value in opts:
     if option == '--debug':
-      logging.getLogger().setLevel(logging.DEBUG)
+      arg_dict['debug'] = True
     elif option in ('-h', '--help'):
       PrintUsageExit(0)
     elif option == '--url':
-      url = value
+      arg_dict['url'] = value
     elif option == '--filename':
-      filename = value
+      arg_dict['filename'] = ExpandFilename(value)
     elif option == '--batch_size':
-      batch_size = int(value)
+      arg_dict['batch_size'] = int(value)
     elif option == '--kind':
-      kind = value
+      arg_dict['kind'] = value
     elif option == '--num_threads':
-      num_threads = int(value)
+      arg_dict['num_threads'] = int(value)
     elif option == '--bandwidth_limit':
-      bandwidth_limit = int(value)
+      arg_dict['bandwidth_limit'] = int(value)
     elif option == '--rps_limit':
-      rps_limit = int(value)
+      arg_dict['rps_limit'] = int(value)
     elif option == '--http_limit':
-      http_limit = int(value)
+      arg_dict['http_limit'] = int(value)
     elif option == '--db_filename':
-      db_filename = value
+      arg_dict['db_filename'] = ExpandFilename(value)
     elif option == '--app_id':
-      app_id = value
+      arg_dict['app_id'] = value
     elif option == '--config_file':
-      config_file = value
+      arg_dict['config_file'] = ExpandFilename(value)
     elif option == '--auth_domain':
-      auth_domain = value
-
-  return ProcessArguments(app_id=app_id,
-                          url=url,
-                          filename=filename,
-                          batch_size=batch_size,
-                          kind=kind,
-                          num_threads=num_threads,
-                          bandwidth_limit=bandwidth_limit,
-                          rps_limit=rps_limit,
-                          http_limit=http_limit,
-                          db_filename=db_filename,
-                          config_file=config_file,
-                          auth_domain=auth_domain,
-                          die_fn=lambda: PrintUsageExit(1))
+      arg_dict['auth_domain'] = value
+    elif option == '--has_header':
+      arg_dict['has_header'] = True
+    elif option == '--csv_has_header':
+      print >>sys.stderr, ('--csv_has_header is deprecated, please use '
+                           '--has_header.')
+      arg_dict['has_header'] = True
+    elif option == '--result_db_filename':
+      arg_dict['result_db_filename'] = ExpandFilename(value)
+    elif option == '--download':
+      arg_dict['download'] = True
+    elif option == '--loader_opts':
+      arg_dict['loader_opts'] = value
+    elif option == '--exporter_opts':
+      arg_dict['exporter_opts'] = value
+    elif option == '--log_file':
+      arg_dict['log_file'] = value
+    elif option == '--email':
+      arg_dict['email'] = value
+    elif option == '--passin':
+      arg_dict['passin'] = True
+
+  return ProcessArguments(arg_dict, die_fn=lambda: PrintUsageExit(1))
 
 
 def ThrottleLayout(bandwidth_limit, http_limit, rps_limit):
+  """Return a dictionary indicating the throttle options."""
   return {
       BANDWIDTH_UP: bandwidth_limit,
       BANDWIDTH_DOWN: bandwidth_limit,
@@ -2367,221 +3948,414 @@
   }
 
 
-def LoadConfig(config_file):
-  """Loads a config file and registers any Loader classes present."""
-  if config_file:
-    global_dict = dict(globals())
-    execfile(config_file, global_dict)
-    for cls in Loader.__subclasses__():
-      Loader.RegisterLoader(cls())
-
-
-def _MissingArgument(arg_name, die_fn):
-  """Print error message about missing argument and die."""
-  print >>sys.stderr, '%s argument required' % arg_name
-  die_fn()
-
-
-def ProcessArguments(app_id=None,
-                     url=None,
-                     filename=None,
-                     batch_size=DEFAULT_BATCH_SIZE,
-                     kind=None,
-                     num_threads=DEFAULT_THREAD_COUNT,
-                     bandwidth_limit=DEFAULT_BANDWIDTH_LIMIT,
-                     rps_limit=DEFAULT_RPS_LIMIT,
-                     http_limit=DEFAULT_REQUEST_LIMIT,
-                     db_filename=None,
-                     config_file=None,
-                     auth_domain='gmail.com',
+def CheckOutputFile(filename):
+  """Check that the given file does not exist and can be opened for writing.
+
+  Args:
+    filename: The name of the file.
+
+  Raises:
+    FileExistsError: if the given filename is not found
+    FileNotWritableError: if the given filename is not readable.
+  """
+  if os.path.exists(filename):
+    raise FileExistsError('%s: output file exists' % filename)
+  elif not os.access(os.path.dirname(filename), os.W_OK):
+    raise FileNotWritableError(
+        '%s: not writable' % os.path.dirname(filename))
+
+
+def LoadConfig(config_file_name, exit_fn=sys.exit):
+  """Loads a config file and registers any Loader classes present.
+
+  Args:
+    config_file_name: The name of the configuration file.
+    exit_fn: Used for dependency injection.
+  """
+  if config_file_name:
+    config_file = open(config_file_name, 'r')
+    try:
+      bulkloader_config = imp.load_module(
+          'bulkloader_config', config_file, config_file_name,
+          ('', 'r', imp.PY_SOURCE))
+      sys.modules['bulkloader_config'] = bulkloader_config
+
+      if hasattr(bulkloader_config, 'loaders'):
+        for cls in bulkloader_config.loaders:
+          Loader.RegisterLoader(cls())
+
+      if hasattr(bulkloader_config, 'exporters'):
+        for cls in bulkloader_config.exporters:
+          Exporter.RegisterExporter(cls())
+    except NameError, e:
+      m = re.search(r"[^']*'([^']*)'.*", str(e))
+      if m.groups() and m.group(1) == 'Loader':
+        print >>sys.stderr, """
+The config file format has changed and you appear to be using an old-style
+config file.  Please make the following changes:
+
+1. At the top of the file, add this:
+
+from google.appengine.tools import bulkloader.Loader
+
+2. For each of your Loader subclasses add the following at the end of the
+   __init__ definitioion:
+
+self.alias_old_names()
+
+3. At the bottom of the file, add this:
+
+loaders = [MyLoader1,...,MyLoaderN]
+
+Where MyLoader1,...,MyLoaderN are the Loader subclasses you want the bulkloader
+to have access to.
+"""
+        exit_fn(1)
+      else:
+        raise
+    except Exception, e:
+      if isinstance(e, NameClashError) or 'bulkloader_config' in vars() and (
+          hasattr(bulkloader_config, 'bulkloader') and
+          isinstance(e, bulkloader_config.bulkloader.NameClashError)):
+        print >> sys.stderr, (
+            'Found both %s and %s while aliasing old names on %s.'%
+            (e.old_name, e.new_name, e.klass))
+        exit_fn(1)
+      else:
+        raise
+
+def GetArgument(kwargs, name, die_fn):
+  """Get the value of the key name in kwargs, or die with die_fn.
+
+  Args:
+    kwargs: A dictionary containing the options for the bulkloader.
+    name: The name of a bulkloader option.
+    die_fn: The function to call to exit the program.
+
+  Returns:
+    The value of kwargs[name] is name in kwargs
+  """
+  if name in kwargs:
+    return kwargs[name]
+  else:
+    print >>sys.stderr, '%s argument required' % name
+    die_fn()
+
+
+def _MakeSignature(app_id=None,
+                   url=None,
+                   kind=None,
+                   db_filename=None,
+                   download=None,
+                   has_header=None,
+                   result_db_filename=None):
+  """Returns a string that identifies the important options for the database."""
+  if download:
+    result_db_line = 'result_db: %s' % result_db_filename
+  else:
+    result_db_line = ''
+  return u"""
+  app_id: %s
+  url: %s
+  kind: %s
+  download: %s
+  progress_db: %s
+  has_header: %s
+  %s
+  """ % (app_id, url, kind, download, db_filename, has_header, result_db_line)
+
+
+def ProcessArguments(arg_dict,
                      die_fn=lambda: sys.exit(1)):
-  """Processes non command-line input arguments."""
+  """Processes non command-line input arguments.
+
+  Args:
+    arg_dict: Dictionary containing the values of bulkloader options.
+    die_fn: Function to call in case of an error during argument processing.
+
+  Returns:
+    A dictionary of bulkloader options.
+  """
+  app_id = GetArgument(arg_dict, 'app_id', die_fn)
+  url = GetArgument(arg_dict, 'url', die_fn)
+  filename = GetArgument(arg_dict, 'filename', die_fn)
+  batch_size = GetArgument(arg_dict, 'batch_size', die_fn)
+  kind = GetArgument(arg_dict, 'kind', die_fn)
+  db_filename = GetArgument(arg_dict, 'db_filename', die_fn)
+  config_file = GetArgument(arg_dict, 'config_file', die_fn)
+  result_db_filename = GetArgument(arg_dict, 'result_db_filename', die_fn)
+  download = GetArgument(arg_dict, 'download', die_fn)
+  log_file = GetArgument(arg_dict, 'log_file', die_fn)
+
+  unused_passin = GetArgument(arg_dict, 'passin', die_fn)
+  unused_email = GetArgument(arg_dict, 'email', die_fn)
+  unused_debug = GetArgument(arg_dict, 'debug', die_fn)
+  unused_num_threads = GetArgument(arg_dict, 'num_threads', die_fn)
+  unused_bandwidth_limit = GetArgument(arg_dict, 'bandwidth_limit', die_fn)
+  unused_rps_limit = GetArgument(arg_dict, 'rps_limit', die_fn)
+  unused_http_limit = GetArgument(arg_dict, 'http_limit', die_fn)
+  unused_auth_domain = GetArgument(arg_dict, 'auth_domain', die_fn)
+  unused_has_headers = GetArgument(arg_dict, 'has_header', die_fn)
+  unused_loader_opts = GetArgument(arg_dict, 'loader_opts', die_fn)
+  unused_exporter_opts = GetArgument(arg_dict, 'exporter_opts', die_fn)
+
+  errors = []
+
   if db_filename is None:
-    db_filename = time.strftime('bulkloader-progress-%Y%m%d.%H%M%S.sql3')
+    arg_dict['db_filename'] = time.strftime(
+        'bulkloader-progress-%Y%m%d.%H%M%S.sql3')
+
+  if result_db_filename is None:
+    arg_dict['result_db_filename'] = time.strftime(
+        'bulkloader-results-%Y%m%d.%H%M%S.sql3')
+
+  if log_file is None:
+    arg_dict['log_file'] = time.strftime('bulkloader-log-%Y%m%d.%H%M%S')
 
   if batch_size <= 0:
-    print >>sys.stderr, 'batch_size must be 1 or larger'
-    die_fn()
-
-  if url is None:
-    _MissingArgument('url', die_fn)
-
-  if filename is None:
-    _MissingArgument('filename', die_fn)
-
-  if kind is None:
-    _MissingArgument('kind', die_fn)
-
-  if config_file is None:
-    _MissingArgument('config_file', die_fn)
-
-  if app_id is None:
+    errors.append('batch_size must be at least 1')
+
+  required = '%s argument required'
+
+  if url is REQUIRED_OPTION:
+    errors.append(required % 'url')
+
+  if filename is REQUIRED_OPTION:
+    errors.append(required % 'filename')
+
+  if kind is REQUIRED_OPTION:
+    errors.append(required % 'kind')
+
+  if config_file is REQUIRED_OPTION:
+    errors.append(required % 'config_file')
+
+  if download:
+    if result_db_filename is REQUIRED_OPTION:
+      errors.append(required % 'result_db_filename')
+
+  if not app_id:
     (unused_scheme, host_port, unused_url_path,
      unused_query, unused_fragment) = urlparse.urlsplit(url)
     suffix_idx = host_port.find('.appspot.com')
     if suffix_idx > -1:
-      app_id = host_port[:suffix_idx]
+      arg_dict['app_id'] = host_port[:suffix_idx]
     elif host_port.split(':')[0].endswith('google.com'):
-      app_id = host_port.split('.')[0]
+      arg_dict['app_id'] = host_port.split('.')[0]
     else:
-      print >>sys.stderr, 'app_id required for non appspot.com domains'
-      die_fn()
-
-  return (app_id, url, filename, batch_size, kind, num_threads,
-          bandwidth_limit, rps_limit, http_limit, db_filename, config_file,
-          auth_domain)
-
-
-def _PerformBulkload(app_id=None,
-                     url=None,
-                     filename=None,
-                     batch_size=DEFAULT_BATCH_SIZE,
-                     kind=None,
-                     num_threads=DEFAULT_THREAD_COUNT,
-                     bandwidth_limit=DEFAULT_BANDWIDTH_LIMIT,
-                     rps_limit=DEFAULT_RPS_LIMIT,
-                     http_limit=DEFAULT_REQUEST_LIMIT,
-                     db_filename=None,
-                     config_file=None,
-                     auth_domain='gmail.com'):
-  """Runs the bulkloader, given the options as keyword arguments.
+      errors.append('app_id argument required for non appspot.com domains')
+
+  if errors:
+    print >>sys.stderr, '\n'.join(errors)
+    die_fn()
+
+  return arg_dict
+
+
+def ParseKind(kind):
+  if kind and kind[0] == '(' and kind[-1] == ')':
+    return tuple(kind[1:-1].split(','))
+  else:
+    return kind
+
+
+def _PerformBulkload(arg_dict,
+                     check_file=CheckFile,
+                     check_output_file=CheckOutputFile):
+  """Runs the bulkloader, given the command line options.
 
   Args:
-    app_id: The application id.
-    url: The url of the remote_api endpoint.
-    filename: The name of the file containing the CSV data.
-    batch_size: The number of records to send per request.
-    kind: The kind of entity to transfer.
-    num_threads: The number of threads to use to transfer data.
-    bandwidth_limit: Maximum bytes/second to transfers.
-    rps_limit: Maximum records/second to transfer.
-    http_limit: Maximum requests/second for transfers.
-    db_filename: The name of the SQLite3 progress database file.
-    config_file: The name of the configuration file.
-    auth_domain: The auth domain to use for logins and UserProperty.
+    arg_dict: Dictionary of bulkloader options.
+    check_file: Used for dependency injection.
+    check_output_file: Used for dependency injection.
 
   Returns:
     An exit code.
+
+  Raises:
+    ConfigurationError: if inconsistent options are passed.
   """
+  app_id = arg_dict['app_id']
+  url = arg_dict['url']
+  filename = arg_dict['filename']
+  batch_size = arg_dict['batch_size']
+  kind = arg_dict['kind']
+  num_threads = arg_dict['num_threads']
+  bandwidth_limit = arg_dict['bandwidth_limit']
+  rps_limit = arg_dict['rps_limit']
+  http_limit = arg_dict['http_limit']
+  db_filename = arg_dict['db_filename']
+  config_file = arg_dict['config_file']
+  auth_domain = arg_dict['auth_domain']
+  has_header = arg_dict['has_header']
+  download = arg_dict['download']
+  result_db_filename = arg_dict['result_db_filename']
+  loader_opts = arg_dict['loader_opts']
+  exporter_opts = arg_dict['exporter_opts']
+  email = arg_dict['email']
+  passin = arg_dict['passin']
+
   os.environ['AUTH_DOMAIN'] = auth_domain
+
+  kind = ParseKind(kind)
+
+  check_file(config_file)
+  if not download:
+    check_file(filename)
+  else:
+    check_output_file(filename)
+
   LoadConfig(config_file)
 
+  os.environ['APPLICATION_ID'] = app_id
+
   throttle_layout = ThrottleLayout(bandwidth_limit, http_limit, rps_limit)
 
   throttle = Throttle(layout=throttle_layout)
-
-
-  workitem_generator_factory = GetCSVGeneratorFactory(filename, batch_size)
+  signature = _MakeSignature(app_id=app_id,
+                             url=url,
+                             kind=kind,
+                             db_filename=db_filename,
+                             download=download,
+                             has_header=has_header,
+                             result_db_filename=result_db_filename)
+
+
+  max_queue_size = max(DEFAULT_QUEUE_SIZE, 3 * num_threads + 5)
 
   if db_filename == 'skip':
     progress_db = StubProgressDatabase()
+  elif not download:
+    progress_db = ProgressDatabase(db_filename, signature)
   else:
-    progress_db = ProgressDatabase(db_filename)
-
-
-  max_queue_size = max(DEFAULT_QUEUE_SIZE, 2 * num_threads + 5)
-
-  PerformBulkUpload(app_id,
-                    url,
-                    kind,
-                    workitem_generator_factory,
-                    num_threads,
-                    throttle,
-                    progress_db,
-                    max_queue_size=max_queue_size)
-
-  return 0
-
-
-def Run(app_id=None,
-        url=None,
-        filename=None,
-        batch_size=DEFAULT_BATCH_SIZE,
-        kind=None,
-        num_threads=DEFAULT_THREAD_COUNT,
-        bandwidth_limit=DEFAULT_BANDWIDTH_LIMIT,
-        rps_limit=DEFAULT_RPS_LIMIT,
-        http_limit=DEFAULT_REQUEST_LIMIT,
-        db_filename=None,
-        auth_domain='gmail.com',
-        config_file=None):
+    progress_db = ExportProgressDatabase(db_filename, signature)
+
+  if download:
+    result_db = ResultDatabase(result_db_filename, signature)
+
+  return_code = 1
+
+  if not download:
+    loader = Loader.RegisteredLoader(kind)
+    try:
+      loader.initialize(filename, loader_opts)
+      workitem_generator_factory = GetCSVGeneratorFactory(
+          kind, filename, batch_size, has_header)
+
+      app = BulkUploaderApp(arg_dict,
+                            workitem_generator_factory,
+                            throttle,
+                            progress_db,
+                            BulkLoaderThread,
+                            ProgressTrackerThread,
+                            max_queue_size,
+                            RequestManager,
+                            DataSourceThread,
+                            ReQueue,
+                            Queue.Queue)
+      try:
+        return_code = app.Run()
+      except AuthenticationError:
+        logger.info('Authentication Failed')
+    finally:
+      loader.finalize()
+  else:
+    exporter = Exporter.RegisteredExporter(kind)
+    try:
+      exporter.initialize(filename, exporter_opts)
+
+      def KeyRangeGeneratorFactory(progress_queue, progress_gen):
+        return KeyRangeGenerator(kind, progress_queue, progress_gen)
+
+      def ExportProgressThreadFactory(progress_queue, progress_db):
+        return ExportProgressThread(kind,
+                                    progress_queue,
+                                    progress_db,
+                                    result_db)
+      app = BulkDownloaderApp(arg_dict,
+                              KeyRangeGeneratorFactory,
+                              throttle,
+                              progress_db,
+                              BulkExporterThread,
+                              ExportProgressThreadFactory,
+                              0,
+                              RequestManager,
+                              DataSourceThread,
+                              ReQueue,
+                              Queue.Queue)
+      try:
+        return_code = app.Run()
+      except AuthenticationError:
+        logger.info('Authentication Failed')
+    finally:
+      exporter.finalize()
+  return return_code
+
+
+def SetupLogging(arg_dict):
+  """Sets up logging for the bulkloader.
+
+  Args:
+    arg_dict: Dictionary mapping flag names to their arguments.
+  """
+  format = '[%(levelname)-8s %(asctime)s %(filename)s] %(message)s'
+  debug = arg_dict['debug']
+  log_file = arg_dict['log_file']
+
+  logger.setLevel(logging.DEBUG)
+
+  logger.propagate = False
+
+  file_handler = logging.FileHandler(log_file, 'w')
+  file_handler.setLevel(logging.DEBUG)
+  file_formatter = logging.Formatter(format)
+  file_handler.setFormatter(file_formatter)
+  logger.addHandler(file_handler)
+
+  console = logging.StreamHandler()
+  level = logging.INFO
+  if debug:
+    level = logging.DEBUG
+  console.setLevel(level)
+  console_format = '[%(levelname)-8s] %(message)s'
+  formatter = logging.Formatter(console_format)
+  console.setFormatter(formatter)
+  logger.addHandler(console)
+
+  logger.info('Logging to %s', log_file)
+
+  appengine_rpc.logger.setLevel(logging.WARN)
+
+
+def Run(arg_dict):
   """Sets up and runs the bulkloader, given the options as keyword arguments.
 
   Args:
-    app_id: The application id.
-    url: The url of the remote_api endpoint.
-    filename: The name of the file containing the CSV data.
-    batch_size: The number of records to send per request.
-    kind: The kind of entity to transfer.
-    num_threads: The number of threads to use to transfer data.
-    bandwidth_limit: Maximum bytes/second to transfers.
-    rps_limit: Maximum records/second to transfer.
-    http_limit: Maximum requests/second for transfers.
-    db_filename: The name of the SQLite3 progress database file.
-    config_file: The name of the configuration file.
-    auth_domain: The auth domain to use for logins and UserProperty.
+    arg_dict: Dictionary of bulkloader options
 
   Returns:
     An exit code.
   """
-  logging.basicConfig(
-      format='%(levelname)-8s %(asctime)s %(filename)s] %(message)s')
-  args = ProcessArguments(app_id=app_id,
-                          url=url,
-                          filename=filename,
-                          batch_size=batch_size,
-                          kind=kind,
-                          num_threads=num_threads,
-                          bandwidth_limit=bandwidth_limit,
-                          rps_limit=rps_limit,
-                          http_limit=http_limit,
-                          db_filename=db_filename,
-                          config_file=config_file)
-
-  (app_id, url, filename, batch_size, kind, num_threads, bandwidth_limit,
-   rps_limit, http_limit, db_filename, config_file, auth_domain) = args
-
-  return _PerformBulkload(app_id=app_id,
-                          url=url,
-                          filename=filename,
-                          batch_size=batch_size,
-                          kind=kind,
-                          num_threads=num_threads,
-                          bandwidth_limit=bandwidth_limit,
-                          rps_limit=rps_limit,
-                          http_limit=http_limit,
-                          db_filename=db_filename,
-                          config_file=config_file,
-                          auth_domain=auth_domain)
+  arg_dict = ProcessArguments(arg_dict)
+
+  SetupLogging(arg_dict)
+
+  return _PerformBulkload(arg_dict)
 
 
 def main(argv):
   """Runs the importer from the command line."""
-  logging.basicConfig(
-      level=logging.INFO,
-      format='%(levelname)-8s %(asctime)s %(filename)s] %(message)s')
-
-  args = ParseArguments(argv)
-  if None in args:
-    print >>sys.stderr, 'Invalid arguments'
+
+  arg_dict = ParseArguments(argv)
+
+  errors = ['%s argument required' % key
+            for (key, value) in arg_dict.iteritems()
+            if value is REQUIRED_OPTION]
+  if errors:
+    print >>sys.stderr, '\n'.join(errors)
     PrintUsageExit(1)
 
-  (app_id, url, filename, batch_size, kind, num_threads,
-   bandwidth_limit, rps_limit, http_limit, db_filename, config_file,
-   auth_domain) = args
-
-  return _PerformBulkload(app_id=app_id,
-                          url=url,
-                          filename=filename,
-                          batch_size=batch_size,
-                          kind=kind,
-                          num_threads=num_threads,
-                          bandwidth_limit=bandwidth_limit,
-                          rps_limit=rps_limit,
-                          http_limit=http_limit,
-                          db_filename=db_filename,
-                          config_file=config_file,
-                          auth_domain=auth_domain)
+  SetupLogging(arg_dict)
+  return _PerformBulkload(arg_dict)
 
 
 if __name__ == '__main__':
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver.py	Fri Apr 24 14:16:00 2009 +0000
@@ -39,6 +39,12 @@
 import cStringIO
 import cgi
 import cgitb
+
+try:
+  import distutils.util
+except ImportError:
+  pass
+
 import dummy_thread
 import email.Utils
 import errno
@@ -258,17 +264,17 @@
         access the URL; False if anyone can access the URL.
     """
     if not isinstance(dispatcher, URLDispatcher):
-      raise TypeError, 'dispatcher must be a URLDispatcher sub-class'
+      raise TypeError('dispatcher must be a URLDispatcher sub-class')
 
     if regex.startswith('^') or regex.endswith('$'):
-      raise InvalidAppConfigError, 'regex starts with "^" or ends with "$"'
+      raise InvalidAppConfigError('regex starts with "^" or ends with "$"')
 
     adjusted_regex = '^%s$' % regex
 
     try:
       url_re = re.compile(adjusted_regex)
     except re.error, e:
-      raise InvalidAppConfigError, 'regex invalid: %s' % e
+      raise InvalidAppConfigError('regex invalid: %s' % e)
 
     match_tuple = (url_re, dispatcher, path, requires_login, admin_only)
     self._url_patterns.append(match_tuple)
@@ -348,7 +354,7 @@
     path variable supplied to this method is ignored.
     """
     cookies = ', '.join(headers.getheaders('cookie'))
-    email, admin = self._get_user_info(cookies)
+    email, admin, user_id = self._get_user_info(cookies)
 
     for matcher in self._url_matchers:
       dispatcher, matched_path, requires_login, admin_only = matcher.Match(relative_url)
@@ -540,8 +546,9 @@
   env['CONTENT_LENGTH'] = headers.getheader('content-length', '')
 
   cookies = ', '.join(headers.getheaders('cookie'))
-  email, admin = get_user_info(cookies)
+  email, admin, user_id = get_user_info(cookies)
   env['USER_EMAIL'] = email
+  env['USER_ID'] = user_id
   if admin:
     env['USER_IS_ADMIN'] = '1'
 
@@ -615,14 +622,14 @@
 def FakeUnlink(path):
   """Fake version of os.unlink."""
   if os.path.isdir(path):
-    raise OSError(2, "Is a directory", path)
+    raise OSError(errno.ENOENT, "Is a directory", path)
   else:
-    raise OSError(1, "Operation not permitted", path)
+    raise OSError(errno.EPERM, "Operation not permitted", path)
 
 
 def FakeReadlink(path):
   """Fake version of os.readlink."""
-  raise OSError(22, "Invalid argument", path)
+  raise OSError(errno.EINVAL, "Invalid argument", path)
 
 
 def FakeAccess(path, mode):
@@ -636,10 +643,33 @@
 def FakeSetLocale(category, value=None, original_setlocale=locale.setlocale):
   """Fake version of locale.setlocale that only supports the default."""
   if value not in (None, '', 'C', 'POSIX'):
-    raise locale.Error, 'locale emulation only supports "C" locale'
+    raise locale.Error('locale emulation only supports "C" locale')
   return original_setlocale(category, 'C')
 
 
+def FakeOpen(file, flags, mode=0777):
+  """Fake version of os.open."""
+  raise OSError(errno.EPERM, "Operation not permitted", file)
+
+
+def FakeRename(src, dst):
+  """Fake version of os.rename."""
+  raise OSError(errno.EPERM, "Operation not permitted", src)
+
+
+def FakeUTime(path, times):
+  """Fake version of os.utime."""
+  raise OSError(errno.EPERM, "Operation not permitted", path)
+
+
+def FakeGetPlatform():
+  """Fake distutils.util.get_platform on OS/X.  Pass-through otherwise."""
+  if sys.platform == 'darwin':
+    return 'macosx-'
+  else:
+    return distutils.util.get_platform()
+
+
 def IsPathInSubdirectories(filename,
                            subdirectories,
                            normcase=os.path.normcase):
@@ -739,6 +769,21 @@
   return output_dict
 
 
+def GeneratePythonPaths(*p):
+  """Generate all valid filenames for the given file
+
+  Args:
+    p: Positional args are the folders to the file and finally the file
+       without a suffix.
+
+  Returns:
+    A list of strings representing the given path to a file with each valid
+      suffix for this python build.
+  """
+  suffixes = imp.get_suffixes()
+  return [os.path.join(*p) + s for s, m, t in suffixes]
+
+
 class FakeFile(file):
   """File sub-class that enforces the security restrictions of the production
   environment.
@@ -771,6 +816,50 @@
 
   ])
 
+  ALLOWED_SITE_PACKAGE_FILES = set(
+    os.path.normcase(os.path.abspath(os.path.join(
+      os.path.dirname(os.__file__), 'site-packages', path)))
+    for path in itertools.chain(*[
+
+      [os.path.join('Crypto')],
+      GeneratePythonPaths('Crypto', '__init__'),
+      [os.path.join('Crypto', 'Cipher')],
+      GeneratePythonPaths('Crypto', 'Cipher', '__init__'),
+      GeneratePythonPaths('Crypto', 'Cipher', 'AES'),
+      GeneratePythonPaths('Crypto', 'Cipher', 'ARC2'),
+      GeneratePythonPaths('Crypto', 'Cipher', 'ARC4'),
+      GeneratePythonPaths('Crypto', 'Cipher', 'Blowfish'),
+      GeneratePythonPaths('Crypto', 'Cipher', 'CAST'),
+      GeneratePythonPaths('Crypto', 'Cipher', 'DES'),
+      GeneratePythonPaths('Crypto', 'Cipher', 'DES3'),
+      GeneratePythonPaths('Crypto', 'Cipher', 'XOR'),
+      [os.path.join('Crypto', 'Hash')],
+      GeneratePythonPaths('Crypto', 'Hash', '__init__'),
+      GeneratePythonPaths('Crypto', 'Hash', 'HMAC'),
+      os.path.join('Crypto', 'Hash', 'MD2'),
+      os.path.join('Crypto', 'Hash', 'MD4'),
+      GeneratePythonPaths('Crypto', 'Hash', 'MD5'),
+      GeneratePythonPaths('Crypto', 'Hash', 'SHA'),
+      os.path.join('Crypto', 'Hash', 'SHA256'),
+      os.path.join('Crypto', 'Hash', 'RIPEMD'),
+      [os.path.join('Crypto', 'Protocol')],
+      GeneratePythonPaths('Crypto', 'Protocol', '__init__'),
+      GeneratePythonPaths('Crypto', 'Protocol', 'AllOrNothing'),
+      GeneratePythonPaths('Crypto', 'Protocol', 'Chaffing'),
+      [os.path.join('Crypto', 'PublicKey')],
+      GeneratePythonPaths('Crypto', 'PublicKey', '__init__'),
+      GeneratePythonPaths('Crypto', 'PublicKey', 'DSA'),
+      GeneratePythonPaths('Crypto', 'PublicKey', 'ElGamal'),
+      GeneratePythonPaths('Crypto', 'PublicKey', 'RSA'),
+      GeneratePythonPaths('Crypto', 'PublicKey', 'pubkey'),
+      GeneratePythonPaths('Crypto', 'PublicKey', 'qNEW'),
+      [os.path.join('Crypto', 'Util')],
+      GeneratePythonPaths('Crypto', 'Util', '__init__'),
+      GeneratePythonPaths('Crypto', 'Util', 'RFC1751'),
+      GeneratePythonPaths('Crypto', 'Util', 'number'),
+      GeneratePythonPaths('Crypto', 'Util', 'randpool'),
+  ]))
+
   _original_file = file
 
   _root_path = None
@@ -863,9 +952,6 @@
     """
     logical_filename = normcase(os.path.abspath(filename))
 
-    if os.path.isdir(logical_filename):
-      logical_filename = os.path.join(logical_filename, 'foo')
-
     result = FakeFile._availability_cache.get(logical_filename)
     if result is None:
       result = FakeFile._IsFileAccessibleNoCache(logical_filename,
@@ -886,9 +972,13 @@
     Returns:
       True if the file is accessible, False otherwise.
     """
-    if IsPathInSubdirectories(logical_filename, [FakeFile._root_path],
+    logical_dirfakefile = logical_filename
+    if os.path.isdir(logical_filename):
+      logical_dirfakefile = os.path.join(logical_filename, 'foo')
+
+    if IsPathInSubdirectories(logical_dirfakefile, [FakeFile._root_path],
                               normcase=normcase):
-      relative_filename = logical_filename[len(FakeFile._root_path):]
+      relative_filename = logical_dirfakefile[len(FakeFile._root_path):]
 
       if (not FakeFile._allow_skipped_files and
           FakeFile._skip_files.match(relative_filename)):
@@ -904,16 +994,19 @@
     if logical_filename in FakeFile.ALLOWED_FILES:
       return True
 
-    if IsPathInSubdirectories(logical_filename,
+    if logical_filename in FakeFile.ALLOWED_SITE_PACKAGE_FILES:
+      return True
+
+    if IsPathInSubdirectories(logical_dirfakefile,
                               FakeFile.ALLOWED_SITE_PACKAGE_DIRS,
                               normcase=normcase):
       return True
 
     allowed_dirs = FakeFile._application_paths | FakeFile.ALLOWED_DIRS
-    if (IsPathInSubdirectories(logical_filename,
+    if (IsPathInSubdirectories(logical_dirfakefile,
                                allowed_dirs,
                                normcase=normcase) and
-        not IsPathInSubdirectories(logical_filename,
+        not IsPathInSubdirectories(logical_dirfakefile,
                                    FakeFile.NOT_ALLOWED_DIRS,
                                    normcase=normcase)):
       return True
@@ -926,7 +1019,7 @@
       raise IOError('invalid mode: %s' % mode)
 
     if not FakeFile.IsFileAccessible(filename):
-      raise IOError(errno.EACCES, 'file not accessible')
+      raise IOError(errno.EACCES, 'file not accessible', filename)
 
     super(FakeFile, self).__init__(filename, mode, bufsize, **kwargs)
 
@@ -950,7 +1043,7 @@
     """Enforces access permissions for the function passed to the constructor.
     """
     if not FakeFile.IsFileAccessible(path):
-      raise OSError(errno.EACCES, 'path not accessible')
+      raise OSError(errno.EACCES, 'path not accessible', path)
 
     return self._original_func(path, *args, **kwargs)
 
@@ -1035,6 +1128,31 @@
       print >>sys.stderr, indent + (message % args)
 
   _WHITE_LIST_C_MODULES = [
+    'AES',
+    'ARC2',
+    'ARC4',
+    'Blowfish',
+    'CAST',
+    'DES',
+    'DES3',
+    'MD2',
+    'MD4',
+    'RIPEMD',
+    'SHA256',
+    'XOR',
+
+    '_Crypto_Cipher__AES',
+    '_Crypto_Cipher__ARC2',
+    '_Crypto_Cipher__ARC4',
+    '_Crypto_Cipher__Blowfish',
+    '_Crypto_Cipher__CAST',
+    '_Crypto_Cipher__DES',
+    '_Crypto_Cipher__DES3',
+    '_Crypto_Cipher__XOR',
+    '_Crypto_Hash__MD2',
+    '_Crypto_Hash__MD4',
+    '_Crypto_Hash__RIPEMD',
+    '_Crypto_Hash__SHA256',
     'array',
     'binascii',
     'bz2',
@@ -1089,7 +1207,24 @@
     '__main__',
   ]
 
+  __CRYPTO_CIPHER_ALLOWED_MODULES = [
+    'MODE_CBC',
+    'MODE_CFB',
+    'MODE_CTR',
+    'MODE_ECB',
+    'MODE_OFB',
+    'block_size',
+    'key_size',
+    'new',
+  ]
   _WHITE_LIST_PARTIAL_MODULES = {
+    'Crypto.Cipher.AES': __CRYPTO_CIPHER_ALLOWED_MODULES,
+    'Crypto.Cipher.ARC2': __CRYPTO_CIPHER_ALLOWED_MODULES,
+    'Crypto.Cipher.Blowfish': __CRYPTO_CIPHER_ALLOWED_MODULES,
+    'Crypto.Cipher.CAST': __CRYPTO_CIPHER_ALLOWED_MODULES,
+    'Crypto.Cipher.DES': __CRYPTO_CIPHER_ALLOWED_MODULES,
+    'Crypto.Cipher.DES3': __CRYPTO_CIPHER_ALLOWED_MODULES,
+
     'gc': [
       'enable',
       'disable',
@@ -1149,12 +1284,14 @@
       'O_SYNC',
       'O_TRUNC',
       'O_WRONLY',
+      'open',
       'pardir',
       'path',
       'pathsep',
       'R_OK',
       'readlink',
       'remove',
+      'rename',
       'SEEK_CUR',
       'SEEK_END',
       'SEEK_SET',
@@ -1166,6 +1303,7 @@
       'TMP_MAX',
       'unlink',
       'urandom',
+      'utime',
       'walk',
       'WCOREDUMP',
       'WEXITSTATUS',
@@ -1191,12 +1329,19 @@
       'listdir': RestrictedPathFunction(os.listdir),
 
       'lstat': RestrictedPathFunction(os.stat),
+      'open': FakeOpen,
       'readlink': FakeReadlink,
       'remove': FakeUnlink,
+      'rename': FakeRename,
       'stat': RestrictedPathFunction(os.stat),
       'uname': FakeUname,
       'unlink': FakeUnlink,
       'urandom': FakeURandom,
+      'utime': FakeUTime,
+    },
+
+    'distutils.util': {
+      'get_platform': FakeGetPlatform,
     },
   }
 
@@ -1483,7 +1628,6 @@
       module.__name__ = 'cPickle'
     elif submodule_fullname == 'os':
       module.__dict__.update(self._os.__dict__)
-      self._module_dict['os.path'] = module.path
     elif self.StubModuleExists(submodule_fullname):
       module = self.ImportStubModule(submodule_fullname)
     else:
@@ -1498,6 +1642,12 @@
     if submodule_fullname not in self._module_dict:
       self._module_dict[submodule_fullname] = module
 
+    if submodule_fullname == 'os':
+      os_path_name = module.path.__name__
+      os_path = self.FindAndLoadModule(os_path_name, os_path_name, search_path)
+      self._module_dict['os.path'] = os_path
+      module.__dict__['path'] = os_path
+
     return module
 
   @Trace
@@ -3013,6 +3163,9 @@
     enable_sendmail: Whether to use sendmail as an alternative to SMTP.
     show_mail_body: Whether to log the body of emails.
     remove: Used for dependency injection.
+    trusted: True if this app can access data belonging to other apps.  This
+      behavior is different from the real app server and should be left False
+      except for advanced uses of dev_appserver.
   """
   login_url = config['login_url']
   datastore_path = config['datastore_path']
@@ -3026,6 +3179,7 @@
   enable_sendmail = config.get('enable_sendmail', False)
   show_mail_body = config.get('show_mail_body', False)
   remove = config.get('remove', os.remove)
+  trusted = config.get('trusted', False)
 
   os.environ['APPLICATION_ID'] = app_id
 
@@ -3041,7 +3195,8 @@
   apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
 
   datastore = datastore_file_stub.DatastoreFileStub(
-      app_id, datastore_path, history_path, require_indexes=require_indexes)
+      app_id, datastore_path, history_path, require_indexes=require_indexes,
+      trusted=trusted)
   apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore)
 
   fixed_login_url = '%s?%s=%%s' % (login_url,
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver_login.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver_login.py	Fri Apr 24 14:16:00 2009 +0000
@@ -28,8 +28,9 @@
 """
 
 
+import cgi
 import Cookie
-import cgi
+import md5
 import os
 import sys
 import urllib
@@ -66,8 +67,8 @@
   if cookie_name in cookie:
     cookie_value = cookie[cookie_name].value
 
-  email, admin = (cookie_value.split(':') + ['', ''])[:2]
-  return email, (admin == 'True')
+  email, admin, user_id = (cookie_value.split(':') + ['', '', ''])[:3]
+  return email, (admin == 'True'), user_id
 
 
 def CreateCookieData(email, admin):
@@ -82,7 +83,12 @@
   admin_string = 'False'
   if admin:
     admin_string = 'True'
-  return '%s:%s' % (email, admin_string)
+  if email:
+    user_id_digest = md5.new(email.lower()).digest()
+    user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
+  else:
+    user_id = ''
+  return '%s:%s:%s' % (email, admin_string, user_id)
 
 
 def SetUserInfoCookie(email, admin, cookie_name=COOKIE_NAME):
--- a/thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/dev_appserver_main.py	Fri Apr 24 14:16:00 2009 +0000
@@ -80,6 +80,7 @@
   """
   global yaml_errors, appcfg, appengine_rpc, dev_appserver, os_compat
   from google.appengine.api import yaml_errors
+  from google.appengine.dist import py_zipimport
   from google.appengine.tools import appcfg
   from google.appengine.tools import appengine_rpc
   from google.appengine.tools import dev_appserver
@@ -110,6 +111,7 @@
 ARG_SMTP_USER = 'smtp_user'
 ARG_STATIC_CACHING = 'static_caching'
 ARG_TEMPLATE_DIR = 'template_dir'
+ARG_TRUSTED = 'trusted'
 
 SDK_PATH = os.path.dirname(
              os.path.dirname(
@@ -142,6 +144,7 @@
   ARG_ADMIN_CONSOLE_HOST: None,
   ARG_ALLOW_SKIPPED_FILES: False,
   ARG_STATIC_CACHING: True,
+  ARG_TRUSTED: False,
 }
 
 API_PATHS = {'1':
@@ -267,6 +270,7 @@
         'smtp_port=',
         'smtp_user=',
         'template_dir=',
+        'trusted',
       ])
   except getopt.GetoptError, e:
     print >>sys.stderr, 'Error: %s' % e
@@ -348,6 +352,9 @@
     if option == '--disable_static_caching':
       option_dict[ARG_STATIC_CACHING] = False
 
+    if option == '--trusted':
+      option_dict[ARG_TRUSTED] = True
+
   return args, option_dict
 
 
@@ -412,7 +419,7 @@
 
   logging.basicConfig(
     level=log_level,
-    format='%(levelname)-8s %(asctime)s %(filename)s] %(message)s')
+    format='%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
 
   config = None
   try:
--- a/thirdparty/google_appengine/google/appengine/tools/os_compat.py	Tue Apr 21 16:28:13 2009 +0000
+++ b/thirdparty/google_appengine/google/appengine/tools/os_compat.py	Fri Apr 24 14:16:00 2009 +0000
@@ -42,3 +42,5 @@
 
 
 ERROR_PATH_NOT_FOUND = 3
+ERROR_ACCESS_DENIED = 5
+ERROR_ALREADY_EXISTS = 183
\ No newline at end of file