|
1 #!/usr/bin/env python |
|
2 # |
|
3 # Copyright 2007 Google Inc. |
|
4 # |
|
5 # Licensed under the Apache License, Version 2.0 (the "License"); |
|
6 # you may not use this file except in compliance with the License. |
|
7 # You may obtain a copy of the License at |
|
8 # |
|
9 # http://www.apache.org/licenses/LICENSE-2.0 |
|
10 # |
|
11 # Unless required by applicable law or agreed to in writing, software |
|
12 # distributed under the License is distributed on an "AS IS" BASIS, |
|
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
14 # See the License for the specific language governing permissions and |
|
15 # limitations under the License. |
|
16 # |
|
17 |
|
18 """Memcache API. |
|
19 |
|
20 Provides memcached-alike API to application developers to store |
|
21 data in memory when reliable storage via the DataStore API isn't |
|
22 required and higher performance is desired. |
|
23 """ |
|
24 |
|
25 |
|
26 |
|
27 import cStringIO |
|
28 import math |
|
29 import pickle |
|
30 import types |
|
31 |
|
32 from google.appengine.api import api_base_pb |
|
33 from google.appengine.api import apiproxy_stub_map |
|
34 from google.appengine.api.memcache import memcache_service_pb |
|
35 from google.appengine.runtime import apiproxy_errors |
|
36 |
|
37 MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse |
|
38 MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest |
|
39 |
|
40 MemcacheGetResponse = memcache_service_pb.MemcacheGetResponse |
|
41 MemcacheGetRequest = memcache_service_pb.MemcacheGetRequest |
|
42 |
|
43 MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse |
|
44 MemcacheDeleteRequest = memcache_service_pb.MemcacheDeleteRequest |
|
45 |
|
46 MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse |
|
47 MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest |
|
48 |
|
49 MemcacheFlushResponse = memcache_service_pb.MemcacheFlushResponse |
|
50 MemcacheFlushRequest = memcache_service_pb.MemcacheFlushRequest |
|
51 |
|
52 MemcacheStatsRequest = memcache_service_pb.MemcacheStatsRequest |
|
53 MemcacheStatsResponse = memcache_service_pb.MemcacheStatsResponse |
|
54 |
|
55 DELETE_NETWORK_FAILURE = 0 |
|
56 DELETE_ITEM_MISSING = 1 |
|
57 DELETE_SUCCESSFUL = 2 |
|
58 |
|
59 MAX_KEY_SIZE = 250 |
|
60 MAX_VALUE_SIZE = 10 ** 6 |
|
61 |
|
62 STAT_HITS = 'hits' |
|
63 STAT_MISSES = 'misses' |
|
64 STAT_BYTE_HITS = 'byte_hits' |
|
65 STAT_ITEMS = 'items' |
|
66 STAT_BYTES = 'bytes' |
|
67 STAT_OLDEST_ITEM_AGES = 'oldest_item_age' |
|
68 |
|
69 FLAG_TYPE_MASK = 7 |
|
70 FLAG_COMPRESSED = 1 << 3 |
|
71 |
|
72 TYPE_STR = 0 |
|
73 TYPE_UNICODE = 1 |
|
74 TYPE_PICKLED = 2 |
|
75 TYPE_INT = 3 |
|
76 TYPE_LONG = 4 |
|
77 |
|
78 |
|
79 def _key_string(key, key_prefix='', server_to_user_dict=None): |
|
80 """Utility function to handle different ways of requesting keys. |
|
81 |
|
82 Args: |
|
83 key: Either a string or tuple of (shard_number, string). In Google App |
|
84 Engine the sharding is automatic so the shard number is ignored. |
|
85 To memcache, the key is just bytes (no defined encoding). |
|
86 key_prefix: Optional string prefix to prepend to key. |
|
87 server_to_user_dict: Optional dictionary to populate with a mapping of |
|
88 server-side key (which includes the key_prefix) to user-supplied key |
|
89 (which does not have the prefix). |
|
90 |
|
91 Returns: |
|
92 The key as a non-unicode string prepended with key_prefix. This is the key |
|
93 sent to and stored by the server. |
|
94 |
|
95 Raises: |
|
96 TypeError: If provided key isn't a string or tuple of (int, string) |
|
97 or key_prefix or server_to_user_dict are of the wrong type. |
|
98 ValueError: If the key, when translated to the server key, is more than |
|
99 250 bytes in length. |
|
100 """ |
|
101 if type(key) is types.TupleType: |
|
102 key = key[1] |
|
103 if not isinstance(key, basestring): |
|
104 raise TypeError('Key must be a string instance, received %r' % key) |
|
105 if not isinstance(key_prefix, basestring): |
|
106 raise TypeError('key_prefix must be a string instance, received %r' % |
|
107 key_prefix) |
|
108 |
|
109 server_key = key_prefix + key |
|
110 if isinstance(server_key, unicode): |
|
111 server_key = server_key.encode('utf-8') |
|
112 |
|
113 if len(server_key) > MAX_KEY_SIZE: |
|
114 raise ValueError('Keys may not be more than %d bytes in length, ' |
|
115 'received %d bytes' % (MAX_KEY_SIZE, len(server_key))) |
|
116 |
|
117 if server_to_user_dict is not None: |
|
118 if not isinstance(server_to_user_dict, dict): |
|
119 raise TypeError('server_to_user_dict must be a dict instance, ' + |
|
120 'received %r' % key) |
|
121 server_to_user_dict[server_key] = key |
|
122 |
|
123 return server_key |
|
124 |
|
125 |
|
126 def _validate_encode_value(value, do_pickle): |
|
127 """Utility function to validate and encode server keys and values. |
|
128 |
|
129 Args: |
|
130 value: Value to store in memcache. If it's a string, it will get passed |
|
131 along as-is. If it's a unicode string, it will be marked appropriately, |
|
132 such that retrievals will yield a unicode value. If it's any other data |
|
133 type, this function will attempt to pickle the data and then store the |
|
134 serialized result, unpickling it upon retrieval. |
|
135 do_pickle: Callable that takes an object and returns a non-unicode |
|
136 string containing the pickled object. |
|
137 |
|
138 Returns: |
|
139 Tuple (stored_value, flags) where: |
|
140 stored_value: The value as a non-unicode string that should be stored |
|
141 in memcache. |
|
142 flags: An integer with bits set from the FLAG_* constants in this file |
|
143 to indicate the encoding of the key and value. |
|
144 |
|
145 Raises: |
|
146 ValueError: If the encoded value is too large. |
|
147 pickle.PicklingError: If the value is not a string and could not be pickled. |
|
148 RuntimeError: If a complicated data structure could not be pickled due to |
|
149 too many levels of recursion in its composition. |
|
150 """ |
|
151 flags = 0 |
|
152 stored_value = value |
|
153 |
|
154 if isinstance(value, str): |
|
155 pass |
|
156 elif isinstance(value, unicode): |
|
157 stored_value = value.encode('utf-8') |
|
158 flags |= TYPE_UNICODE |
|
159 elif isinstance(value, int): |
|
160 stored_value = str(value) |
|
161 flags |= TYPE_INT |
|
162 elif isinstance(value, long): |
|
163 stored_value = str(value) |
|
164 flags |= TYPE_LONG |
|
165 else: |
|
166 stored_value = do_pickle(value) |
|
167 flags |= TYPE_PICKLED |
|
168 |
|
169 |
|
170 if len(stored_value) > MAX_VALUE_SIZE: |
|
171 raise ValueError('Values may not be more than %d bytes in length; ' |
|
172 'received %d bytes' % (MAX_VALUE_SIZE, len(stored_value))) |
|
173 |
|
174 return (stored_value, flags) |
|
175 |
|
176 |
|
177 def _decode_value(stored_value, flags, do_unpickle): |
|
178 """Utility function for decoding values retrieved from memcache. |
|
179 |
|
180 Args: |
|
181 stored_value: The value as a non-unicode string that was stored. |
|
182 flags: An integer with bits set from the FLAG_* constants in this file |
|
183 that indicate the encoding of the key and value. |
|
184 do_unpickle: Callable that takes a non-unicode string object that contains |
|
185 a pickled object and returns the pickled object. |
|
186 |
|
187 Returns: |
|
188 The original object that was stored, be it a normal string, a unicode |
|
189 string, int, long, or a Python object that was pickled. |
|
190 |
|
191 Raises: |
|
192 pickle.UnpicklingError: If the value could not be unpickled. |
|
193 """ |
|
194 assert isinstance(stored_value, str) |
|
195 assert isinstance(flags, (int, long)) |
|
196 |
|
197 type_number = flags & FLAG_TYPE_MASK |
|
198 value = stored_value |
|
199 |
|
200 |
|
201 if type_number == TYPE_STR: |
|
202 return value |
|
203 elif type_number == TYPE_UNICODE: |
|
204 return value.decode('utf-8') |
|
205 elif type_number == TYPE_PICKLED: |
|
206 return do_unpickle(value) |
|
207 elif type_number == TYPE_INT: |
|
208 return int(value) |
|
209 elif type_number == TYPE_LONG: |
|
210 return long(value) |
|
211 else: |
|
212 assert False, "Unknown stored type" |
|
213 assert False, "Shouldn't get here." |
|
214 |
|
215 class Client(object): |
|
216 """Memcache client object, through which one invokes all memcache operations. |
|
217 |
|
218 Several methods are no-ops to retain source-level compatibility |
|
219 with the existing popular Python memcache library. |
|
220 |
|
221 Any method that takes a 'key' argument will accept that key as a string |
|
222 (unicode or not) or a tuple of (hash_value, string) where the hash_value, |
|
223 normally used for sharding onto a memcache instance, is instead ignored, as |
|
224 Google App Engine deals with the sharding transparently. Keys in memcache are |
|
225 just bytes, without a specified encoding. All such methods may raise TypeError |
|
226 if provided a bogus key value and a ValueError if the key is too large. |
|
227 |
|
228 Any method that takes a 'value' argument will accept as that value any |
|
229 string (unicode or not), int, long, or pickle-able Python object, including |
|
230 all native types. You'll get back from the cache the same type that you |
|
231 originally put in. |
|
232 """ |
|
233 |
|
234 def __init__(self, servers=None, debug=0, |
|
235 pickleProtocol=pickle.HIGHEST_PROTOCOL, |
|
236 pickler=pickle.Pickler, |
|
237 unpickler=pickle.Unpickler, |
|
238 pload=None, |
|
239 pid=None, |
|
240 make_sync_call=apiproxy_stub_map.MakeSyncCall): |
|
241 """Create a new Client object. |
|
242 |
|
243 No parameters are required. |
|
244 |
|
245 Arguments: |
|
246 servers: Ignored; only for compatibility. |
|
247 debug: Ignored; only for compatibility. |
|
248 pickleProtocol: Pickle protocol to use for pickling the object. |
|
249 pickler: pickle.Pickler sub-class to use for pickling. |
|
250 unpickler: pickle.Unpickler sub-class to use for unpickling. |
|
251 pload: Callable to use for retrieving objects by persistent id. |
|
252 pid: Callable to use for determine the persistent id for objects, if any. |
|
253 make_sync_call: Function to use to make an App Engine service call. |
|
254 Used for testing. |
|
255 """ |
|
256 self._pickle_data = cStringIO.StringIO() |
|
257 self._pickler_instance = pickler(self._pickle_data, |
|
258 protocol=pickleProtocol) |
|
259 self._unpickler_instance = unpickler(self._pickle_data) |
|
260 if pid is not None: |
|
261 self._pickler_instance.persistent_id = pid |
|
262 if pload is not None: |
|
263 self._unpickler_instance.persistent_load = pload |
|
264 |
|
265 def DoPickle(value): |
|
266 self._pickle_data.truncate(0) |
|
267 self._pickler_instance.clear_memo() |
|
268 self._pickler_instance.dump(value) |
|
269 return self._pickle_data.getvalue() |
|
270 self._do_pickle = DoPickle |
|
271 |
|
272 def DoUnpickle(value): |
|
273 self._pickle_data.truncate(0) |
|
274 self._pickle_data.write(value) |
|
275 self._pickle_data.seek(0) |
|
276 self._unpickler_instance.memo.clear() |
|
277 return self._unpickler_instance.load() |
|
278 self._do_unpickle = DoUnpickle |
|
279 |
|
280 self._make_sync_call = make_sync_call |
|
281 |
|
282 def set_servers(self, servers): |
|
283 """Sets the pool of memcache servers used by the client. |
|
284 |
|
285 This is purely a compatibility method. In Google App Engine, it's a no-op. |
|
286 """ |
|
287 pass |
|
288 |
|
289 def disconnect_all(self): |
|
290 """Closes all connections to memcache servers. |
|
291 |
|
292 This is purely a compatibility method. In Google App Engine, it's a no-op. |
|
293 """ |
|
294 pass |
|
295 |
|
296 def forget_dead_hosts(self): |
|
297 """Resets all servers to the alive status. |
|
298 |
|
299 This is purely a compatibility method. In Google App Engine, it's a no-op. |
|
300 """ |
|
301 pass |
|
302 |
|
303 def debuglog(self): |
|
304 """Logging function for debugging information. |
|
305 |
|
306 This is purely a compatibility method. In Google App Engine, it's a no-op. |
|
307 """ |
|
308 pass |
|
309 |
|
310 def get_stats(self): |
|
311 """Gets memcache statistics for this application. |
|
312 |
|
313 All of these statistics may reset due to various transient conditions. They |
|
314 provide the best information available at the time of being called. |
|
315 |
|
316 Returns: |
|
317 Dictionary mapping statistic names to associated values. Statistics and |
|
318 their associated meanings: |
|
319 |
|
320 hits: Number of cache get requests resulting in a cache hit. |
|
321 misses: Number of cache get requests resulting in a cache miss. |
|
322 byte_hits: Sum of bytes transferred on get requests. Rolls over to |
|
323 zero on overflow. |
|
324 items: Number of key/value pairs in the cache. |
|
325 bytes: Total size of all items in the cache. |
|
326 oldest_item_age: How long in seconds since the oldest item in the |
|
327 cache was accessed. Effectively, this indicates how long a new |
|
328 item will survive in the cache without being accessed. This is |
|
329 _not_ the amount of time that has elapsed since the item was |
|
330 created. |
|
331 |
|
332 On error, returns None. |
|
333 """ |
|
334 request = MemcacheStatsRequest() |
|
335 response = MemcacheStatsResponse() |
|
336 try: |
|
337 self._make_sync_call('memcache', 'Stats', request, response) |
|
338 except apiproxy_errors.ApplicationError, e: |
|
339 return None |
|
340 |
|
341 if not response.has_stats(): |
|
342 return None |
|
343 |
|
344 stats = response.stats() |
|
345 return { |
|
346 STAT_HITS: stats.hits(), |
|
347 STAT_MISSES: stats.misses(), |
|
348 STAT_BYTE_HITS: stats.byte_hits(), |
|
349 STAT_ITEMS: stats.items(), |
|
350 STAT_BYTES: stats.bytes(), |
|
351 STAT_OLDEST_ITEM_AGES: stats.oldest_item_age(), |
|
352 } |
|
353 |
|
354 def flush_all(self): |
|
355 """Deletes everything in memcache. |
|
356 |
|
357 Returns: |
|
358 True on success, False on RPC or server error. |
|
359 """ |
|
360 request = MemcacheFlushRequest() |
|
361 response = MemcacheFlushResponse() |
|
362 try: |
|
363 self._make_sync_call('memcache', 'FlushAll', request, response) |
|
364 except apiproxy_errors.ApplicationError: |
|
365 return False |
|
366 return True |
|
367 |
|
368 def get(self, key): |
|
369 """Looks up a single key in memcache. |
|
370 |
|
371 If you have multiple items to load, though, it's much more efficient |
|
372 to use get_multi() instead, which loads them in one bulk operation, |
|
373 reducing the networking latency that'd otherwise be required to do |
|
374 many serialized get() operations. |
|
375 |
|
376 Args: |
|
377 key: The key in memcache to look up. See docs on Client |
|
378 for details of format. |
|
379 |
|
380 Returns: |
|
381 The value of the key, if found in memcache, else None. |
|
382 """ |
|
383 request = MemcacheGetRequest() |
|
384 request.add_key(_key_string(key)) |
|
385 response = MemcacheGetResponse() |
|
386 try: |
|
387 self._make_sync_call('memcache', 'Get', request, response) |
|
388 except apiproxy_errors.ApplicationError: |
|
389 return None |
|
390 |
|
391 if not response.item_size(): |
|
392 return None |
|
393 |
|
394 return _decode_value(response.item(0).value(), |
|
395 response.item(0).flags(), |
|
396 self._do_unpickle) |
|
397 |
|
398 def get_multi(self, keys, key_prefix=''): |
|
399 """Looks up multiple keys from memcache in one operation. |
|
400 |
|
401 This is the recommended way to do bulk loads. |
|
402 |
|
403 Args: |
|
404 keys: List of keys to look up. Keys may be strings or |
|
405 tuples of (hash_value, string). Google App Engine |
|
406 does the sharding and hashing automatically, though, so the hash |
|
407 value is ignored. To memcache, keys are just series of bytes, |
|
408 and not in any particular encoding. |
|
409 key_prefix: Prefix to prepend to all keys when talking to the server; |
|
410 not included in the returned dictionary. |
|
411 |
|
412 Returns: |
|
413 A dictionary of the keys and values that were present in memcache. |
|
414 Even if the key_prefix was specified, that key_prefix won't be on |
|
415 the keys in the returned dictionary. |
|
416 """ |
|
417 request = MemcacheGetRequest() |
|
418 response = MemcacheGetResponse() |
|
419 user_key = {} |
|
420 for key in keys: |
|
421 request.add_key(_key_string(key, key_prefix, user_key)) |
|
422 try: |
|
423 self._make_sync_call('memcache', 'Get', request, response) |
|
424 except apiproxy_errors.ApplicationError: |
|
425 return {} |
|
426 |
|
427 return_value = {} |
|
428 for returned_item in response.item_list(): |
|
429 value = _decode_value(returned_item.value(), returned_item.flags(), |
|
430 self._do_unpickle) |
|
431 return_value[user_key[returned_item.key()]] = value |
|
432 return return_value |
|
433 |
|
434 def delete(self, key, seconds=0): |
|
435 """Deletes a key from memcache. |
|
436 |
|
437 Args: |
|
438 key: Key to delete. See docs on Client for detils. |
|
439 seconds: Optional number of seconds to make deleted items 'locked' |
|
440 for 'add' operations. Value can be a delta from current time (up to |
|
441 1 month), or an absolute Unix epoch time. Defaults to 0, which means |
|
442 items can be immediately added. With or without this option, |
|
443 a 'set' operation will always work. Float values will be rounded up to |
|
444 the nearest whole second. |
|
445 |
|
446 Returns: |
|
447 DELETE_NETWORK_FAILURE (0) on network failure, |
|
448 DELETE_ITEM_MISSING (1) if the server tried to delete the item but |
|
449 didn't have it, or |
|
450 DELETE_SUCCESSFUL (2) if the item was actually deleted. |
|
451 This can be used as a boolean value, where a network failure is the |
|
452 only bad condition. |
|
453 """ |
|
454 if not isinstance(seconds, (int, long, float)): |
|
455 raise TypeError('Delete timeout must be a number.') |
|
456 if seconds < 0: |
|
457 raise ValueError('Delete timeout must be non-negative.') |
|
458 |
|
459 request = MemcacheDeleteRequest() |
|
460 response = MemcacheDeleteResponse() |
|
461 |
|
462 delete_item = request.add_item() |
|
463 delete_item.set_key(_key_string(key)) |
|
464 delete_item.set_delete_time(int(math.ceil(seconds))) |
|
465 try: |
|
466 self._make_sync_call('memcache', 'Delete', request, response) |
|
467 except apiproxy_errors.ApplicationError: |
|
468 return DELETE_NETWORK_FAILURE |
|
469 assert response.delete_status_size() == 1, 'Unexpected status size.' |
|
470 |
|
471 if response.delete_status(0) == MemcacheDeleteResponse.DELETED: |
|
472 return DELETE_SUCCESSFUL |
|
473 elif response.delete_status(0) == MemcacheDeleteResponse.NOT_FOUND: |
|
474 return DELETE_ITEM_MISSING |
|
475 assert False, 'Unexpected deletion status code.' |
|
476 |
|
477 def delete_multi(self, keys, seconds=0, key_prefix=''): |
|
478 """Delete multiple keys at once. |
|
479 |
|
480 Args: |
|
481 keys: List of keys to delete. |
|
482 seconds: Optional number of seconds to make deleted items 'locked' |
|
483 for 'add' operations. Value can be a delta from current time (up to |
|
484 1 month), or an absolute Unix epoch time. Defaults to 0, which means |
|
485 items can be immediately added. With or without this option, |
|
486 a 'set' operation will always work. Float values will be rounded up to |
|
487 the nearest whole second. |
|
488 key_prefix: Prefix to put on all keys when sending specified |
|
489 keys to memcache. See docs for get_multi() and set_multi(). |
|
490 |
|
491 Returns: |
|
492 True if all operations completed successfully. False if one |
|
493 or more failed to complete. |
|
494 """ |
|
495 if not isinstance(seconds, (int, long, float)): |
|
496 raise TypeError('Delete timeout must be a number.') |
|
497 if seconds < 0: |
|
498 raise ValueError('Delete timeout must not be negative.') |
|
499 |
|
500 request = MemcacheDeleteRequest() |
|
501 response = MemcacheDeleteResponse() |
|
502 |
|
503 for key in keys: |
|
504 delete_item = request.add_item() |
|
505 delete_item.set_key(_key_string(key, key_prefix=key_prefix)) |
|
506 delete_item.set_delete_time(int(math.ceil(seconds))) |
|
507 try: |
|
508 self._make_sync_call('memcache', 'Delete', request, response) |
|
509 except apiproxy_errors.ApplicationError: |
|
510 return False |
|
511 return True |
|
512 |
|
513 def set(self, key, value, time=0, min_compress_len=0): |
|
514 """Sets a key's value, regardless of previous contents in cache. |
|
515 |
|
516 Unlike add() and replace(), this method always sets (or |
|
517 overwrites) the value in memcache, regardless of previous |
|
518 contents. |
|
519 |
|
520 Args: |
|
521 key: Key to set. See docs on Client for details. |
|
522 value: Value to set. Any type. If complex, will be pickled. |
|
523 time: Optional expiration time, either relative number of seconds |
|
524 from current time (up to 1 month), or an absolute Unix epoch time. |
|
525 By default, items never expire, though items may be evicted due to |
|
526 memory pressure. Float values will be rounded up to the nearest |
|
527 whole second. |
|
528 min_compress_len: Ignored option for compatibility. |
|
529 |
|
530 Returns: |
|
531 True if set. False on error. |
|
532 """ |
|
533 return self._set_with_policy(MemcacheSetRequest.SET, key, value, time=time) |
|
534 |
|
535 def add(self, key, value, time=0, min_compress_len=0): |
|
536 """Sets a key's value, iff item is not already in memcache. |
|
537 |
|
538 Args: |
|
539 key: Key to set. See docs on Client for details. |
|
540 value: Value to set. Any type. If complex, will be pickled. |
|
541 time: Optional expiration time, either relative number of seconds |
|
542 from current time (up to 1 month), or an absolute Unix epoch time. |
|
543 By default, items never expire, though items may be evicted due to |
|
544 memory pressure. Float values will be rounded up to the nearest |
|
545 whole second. |
|
546 min_compress_len: Ignored option for compatibility. |
|
547 |
|
548 Returns: |
|
549 True if added. False on error. |
|
550 """ |
|
551 return self._set_with_policy(MemcacheSetRequest.ADD, key, value, time=time) |
|
552 |
|
553 def replace(self, key, value, time=0, min_compress_len=0): |
|
554 """Replaces a key's value, failing if item isn't already in memcache. |
|
555 |
|
556 Args: |
|
557 key: Key to set. See docs on Client for details. |
|
558 value: Value to set. Any type. If complex, will be pickled. |
|
559 time: Optional expiration time, either relative number of seconds |
|
560 from current time (up to 1 month), or an absolute Unix epoch time. |
|
561 By default, items never expire, though items may be evicted due to |
|
562 memory pressure. Float values will be rounded up to the nearest |
|
563 whole second. |
|
564 min_compress_len: Ignored option for compatibility. |
|
565 |
|
566 Returns: |
|
567 True if replaced. False on RPC error or cache miss. |
|
568 """ |
|
569 return self._set_with_policy(MemcacheSetRequest.REPLACE, |
|
570 key, value, time=time) |
|
571 |
|
572 def _set_with_policy(self, policy, key, value, time=0): |
|
573 """Sets a single key with a specified policy. |
|
574 |
|
575 Helper function for set(), add(), and replace(). |
|
576 |
|
577 Args: |
|
578 policy: One of MemcacheSetRequest.SET, .ADD, or .REPLACE. |
|
579 key: Key to add, set, or replace. See docs on Client for details. |
|
580 value: Value to set. |
|
581 time: Expiration time, defaulting to 0 (never expiring). |
|
582 |
|
583 Returns: |
|
584 True if stored, False on RPC error or policy error, e.g. a replace |
|
585 that failed due to the item not already existing, or an add |
|
586 failing due to the item not already existing. |
|
587 """ |
|
588 if not isinstance(time, (int, long, float)): |
|
589 raise TypeError('Expiration must be a number.') |
|
590 if time < 0: |
|
591 raise ValueError('Expiration must not be negative.') |
|
592 |
|
593 request = MemcacheSetRequest() |
|
594 item = request.add_item() |
|
595 item.set_key(_key_string(key)) |
|
596 stored_value, flags = _validate_encode_value(value, self._do_pickle) |
|
597 item.set_value(stored_value) |
|
598 item.set_flags(flags) |
|
599 item.set_set_policy(policy) |
|
600 item.set_expiration_time(int(math.ceil(time))) |
|
601 response = MemcacheSetResponse() |
|
602 try: |
|
603 self._make_sync_call('memcache', 'Set', request, response) |
|
604 except apiproxy_errors.ApplicationError: |
|
605 return False |
|
606 if response.set_status_size() != 1: |
|
607 return False |
|
608 return response.set_status(0) == MemcacheSetResponse.STORED |
|
609 |
|
610 def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0): |
|
611 """Set multiple keys' values at once. |
|
612 |
|
613 This reduces the network latency of doing many requests in serial. |
|
614 |
|
615 Args: |
|
616 mapping: Dictionary of keys to values. |
|
617 time: Optional expiration time, either relative number of seconds |
|
618 from current time (up to 1 month), or an absolute Unix epoch time. |
|
619 By default, items never expire, though items may be evicted due to |
|
620 memory pressure. Float values will be rounded up to the nearest |
|
621 whole second. |
|
622 key_prefix: Prefix for to prepend to all keys. |
|
623 min_compress_len: Unimplemented compatibility option. |
|
624 |
|
625 Returns: |
|
626 A list of keys whose values were NOT set. On total success, |
|
627 this list should be empty. |
|
628 """ |
|
629 if not isinstance(time, (int, long, float)): |
|
630 raise TypeError('Expiration must be a number.') |
|
631 if time < 0.0: |
|
632 raise ValueError('Expiration must not be negative.') |
|
633 |
|
634 request = MemcacheSetRequest() |
|
635 user_key = {} |
|
636 server_keys = [] |
|
637 for key, value in mapping.iteritems(): |
|
638 server_key = _key_string(key, key_prefix, user_key) |
|
639 stored_value, flags = _validate_encode_value(value, self._do_pickle) |
|
640 server_keys.append(server_key) |
|
641 |
|
642 item = request.add_item() |
|
643 item.set_key(server_key) |
|
644 item.set_value(stored_value) |
|
645 item.set_flags(flags) |
|
646 item.set_set_policy(MemcacheSetRequest.SET) |
|
647 item.set_expiration_time(int(math.ceil(time))) |
|
648 |
|
649 response = MemcacheSetResponse() |
|
650 try: |
|
651 self._make_sync_call('memcache', 'Set', request, response) |
|
652 except apiproxy_errors.ApplicationError: |
|
653 return False |
|
654 |
|
655 assert response.set_status_size() == len(server_keys) |
|
656 |
|
657 unset_list = [] |
|
658 for server_key, set_status in zip(server_keys, response.set_status_list()): |
|
659 if set_status != MemcacheSetResponse.STORED: |
|
660 unset_list.append(user_key[server_key]) |
|
661 |
|
662 return unset_list |
|
663 |
|
664 def incr(self, key, delta=1): |
|
665 """Atomically increments a key's value. |
|
666 |
|
667 Internally, the value is a unsigned 64-bit integer. Memcache |
|
668 doesn't check 64-bit overflows. The value, if too large, will |
|
669 wrap around. |
|
670 |
|
671 The key must already exist in the cache to be incremented. To |
|
672 initialize a counter, set() it to the initial value, as an |
|
673 ASCII decimal integer. Future get()s of the key, post-increment, |
|
674 will still be an ASCII decimal value. |
|
675 |
|
676 Args: |
|
677 key: Key to increment. See Client's docstring for details. |
|
678 delta: Non-negative integer value (int or long) to increment key by, |
|
679 defaulting to 1. |
|
680 |
|
681 Returns: |
|
682 New long integer value, or None if key was not in the cache or could not |
|
683 be incremented for any other reason. |
|
684 |
|
685 Raises: |
|
686 ValueError: If number is negative. |
|
687 TypeError: If delta isn't an int or long. |
|
688 """ |
|
689 return self._incrdecr(key, False, delta) |
|
690 |
|
691 def decr(self, key, delta=1): |
|
692 """Atomically decrements a key's value. |
|
693 |
|
694 Internally, the value is a unsigned 64-bit integer. Memcache |
|
695 caps decrementing below zero to zero. |
|
696 |
|
697 The key must already exist in the cache to be decremented. See |
|
698 docs on incr() for details. |
|
699 |
|
700 Args: |
|
701 key: Key to decrement. See Client's docstring for details. |
|
702 delta: Non-negative integer value (int or long) to decrement key by, |
|
703 defaulting to 1. |
|
704 |
|
705 Returns: |
|
706 New long integer value, or None if key wasn't in cache and couldn't |
|
707 be decremented. |
|
708 |
|
709 Raises: |
|
710 ValueError: If number is negative. |
|
711 TypeError: If delta isn't an int or long. |
|
712 """ |
|
713 return self._incrdecr(key, True, delta) |
|
714 |
|
715 def _incrdecr(self, key, is_negative, delta): |
|
716 """Increment or decrement a key by a provided delta. |
|
717 |
|
718 Args: |
|
719 key: Key to increment or decrement. |
|
720 is_negative: Boolean, if this is a decrement. |
|
721 delta: Non-negative integer amount (int or long) to increment |
|
722 or decrement by. |
|
723 |
|
724 Returns: |
|
725 New long integer value, or None on cache miss. |
|
726 |
|
727 Raises: |
|
728 ValueError: If delta is negative. |
|
729 TypeError: If delta isn't an int or long. |
|
730 """ |
|
731 if not isinstance(delta, (int, long)): |
|
732 raise TypeError('Delta must be an integer or long, received %r' % delta) |
|
733 if delta < 0: |
|
734 raise ValueError('Delta must not be negative.') |
|
735 |
|
736 request = MemcacheIncrementRequest() |
|
737 response = MemcacheIncrementResponse() |
|
738 request.set_key(_key_string(key)) |
|
739 request.set_delta(delta) |
|
740 if is_negative: |
|
741 request.set_direction(MemcacheIncrementRequest.DECREMENT) |
|
742 else: |
|
743 request.set_direction(MemcacheIncrementRequest.INCREMENT) |
|
744 |
|
745 try: |
|
746 self._make_sync_call('memcache', 'Increment', request, response) |
|
747 except apiproxy_errors.ApplicationError: |
|
748 return None |
|
749 |
|
750 if response.has_new_value(): |
|
751 return response.new_value() |
|
752 return None |
|
753 |
|
754 |
|
755 _CLIENT = None |
|
756 |
|
757 |
|
758 def setup_client(client_obj): |
|
759 """Sets the Client object instance to use for all module-level methods. |
|
760 |
|
761 Use this method if you want to have customer persistent_id() or |
|
762 persistent_load() functions associated with your client. |
|
763 |
|
764 Args: |
|
765 client_obj: Instance of the memcache.Client object. |
|
766 """ |
|
767 global _CLIENT |
|
768 var_dict = globals() |
|
769 |
|
770 _CLIENT = client_obj |
|
771 var_dict['set_servers'] = _CLIENT.set_servers |
|
772 var_dict['disconnect_all'] = _CLIENT.disconnect_all |
|
773 var_dict['forget_dead_hosts'] = _CLIENT.forget_dead_hosts |
|
774 var_dict['debuglog'] = _CLIENT.debuglog |
|
775 var_dict['get'] = _CLIENT.get |
|
776 var_dict['get_multi'] = _CLIENT.get_multi |
|
777 var_dict['set'] = _CLIENT.set |
|
778 var_dict['set_multi'] = _CLIENT.set_multi |
|
779 var_dict['add'] = _CLIENT.add |
|
780 var_dict['replace'] = _CLIENT.replace |
|
781 var_dict['delete'] = _CLIENT.delete |
|
782 var_dict['delete_multi'] = _CLIENT.delete_multi |
|
783 var_dict['incr'] = _CLIENT.incr |
|
784 var_dict['decr'] = _CLIENT.decr |
|
785 var_dict['flush_all'] = _CLIENT.flush_all |
|
786 var_dict['get_stats'] = _CLIENT.get_stats |
|
787 |
|
788 |
|
789 setup_client(Client()) |