diff --git a/.github/workflows/build_stage.yml b/.github/workflows/build_stage.yml index dd755e1..650aa0b 100644 --- a/.github/workflows/build_stage.yml +++ b/.github/workflows/build_stage.yml @@ -70,9 +70,9 @@ jobs: args: --follow-symlinks --delete env: SOURCE_DIR: infrastructure/cloudformation - AWS_REGION: "us-east-1" - DEST_DIR: dt/stage/cloudformation - AWS_S3_BUCKET: sumu-stacks + AWS_REGION: "us-east-2" + DEST_DIR: stage/cloudformation + AWS_S3_BUCKET: dt-deployment-bucket AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - name: Configure AWS Credentials @@ -80,11 +80,11 @@ jobs: with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-1 + aws-region: us-east-2 - name: Deploy to AWS CloudFormation uses: aws-actions/aws-cloudformation-github-deploy@v1 with: name: dt-infrastructure-stage template: infrastructure/cloudformation/dt/top.yaml capabilities: "CAPABILITY_NAMED_IAM,CAPABILITY_IAM" - parameter-overrides: VpcId=${{ secrets.VPC_ID }},SubDomain=stage.dt,Domain=${{ secrets.DOMAIN }},environment=stage,DockerTag=stage,release=stage,PublicSubnets=${{ secrets.SUBNET_IDS }} \ No newline at end of file + parameter-overrides: VpcId=${{ secrets.VPC_ID }},SubDomain=stage,Domain=${{ secrets.DOMAIN }},environment=stage,DockerTag=stage,release=stage,PublicSubnets=${{ secrets.SUBNET_IDS }} \ No newline at end of file diff --git a/.github/workflows/push_dev.yml b/.github/workflows/push_dev.yml index 50d7c92..c6933de 100644 --- a/.github/workflows/push_dev.yml +++ b/.github/workflows/push_dev.yml @@ -19,8 +19,8 @@ jobs: args: --follow-symlinks --delete env: SOURCE_DIR: infrastructure/cloudformation - AWS_REGION: "us-east-1" - DEST_DIR: dt/develop/cloudformation - AWS_S3_BUCKET: sumu-stacks + AWS_REGION: "us-east-2" + DEST_DIR: develop/cloudformation + AWS_S3_BUCKET: dt-deployment-bucket AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fd65ed5..2f4824e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -127,9 +127,9 @@ jobs: args: --follow-symlinks --delete env: SOURCE_DIR: infrastructure/cloudformation - AWS_REGION: "us-east-1" - DEST_DIR: dt/production/cloudformation - AWS_S3_BUCKET: sumu-stacks + AWS_REGION: "us-east-2" + DEST_DIR: production/cloudformation + AWS_S3_BUCKET: dt-deployment-bucket AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - name: Configure AWS Credentials @@ -137,7 +137,7 @@ jobs: with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-1 + aws-region: us-east-2 - name: Deploy to AWS CloudFormation uses: aws-actions/aws-cloudformation-github-deploy@v1 with: diff --git a/infrastructure/cloudformation/dt/top.yaml b/infrastructure/cloudformation/dt/top.yaml index 739e1f4..da29840 100644 --- a/infrastructure/cloudformation/dt/top.yaml +++ b/infrastructure/cloudformation/dt/top.yaml @@ -61,7 +61,7 @@ Resources: Condition: CreateDns Type: AWS::CloudFormation::Stack Properties: - TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/sumu-stacks/dt/${release}/cloudformation/dt/dns.yaml' + TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/dns.yaml' Parameters: environment: !Ref environment Domain: !Ref Domain @@ -123,7 +123,7 @@ Resources: LoadBalancing: Type: AWS::CloudFormation::Stack Properties: - TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/sumu-stacks/dt/${release}/cloudformation/dt/load_balancing.yaml' + TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/load_balancing.yaml' Parameters: environment: !Ref environment release: !Ref release @@ -133,7 +133,7 @@ Resources: EcsCluster: Type: AWS::CloudFormation::Stack Properties: - TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/sumu-stacks/dt/${release}/cloudformation/cluster/top.yaml' + TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/cluster/top.yaml' Parameters: Environment: !Ref environment VpcId: !Ref VpcId @@ -148,7 +148,7 @@ Resources: TaskDefinition: Type: AWS::CloudFormation::Stack Properties: - TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/sumu-stacks/dt/${release}/cloudformation/dt/server_task.yaml' + TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/task.yaml' Parameters: environment: !Ref environment LogGroupName: !Ref LogGroup diff --git a/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/INSTALLER b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/LICENSE b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/LICENSE new file mode 100644 index 0000000..29a3fe3 --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 Andy McCurdy + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. diff --git a/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/METADATA b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/METADATA new file mode 100644 index 0000000..a97857a --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/METADATA @@ -0,0 +1,927 @@ +Metadata-Version: 2.1 +Name: redis +Version: 3.5.2 +Summary: Python client for Redis key-value store +Home-page: https://github.com/andymccurdy/redis-py +Author: Andy McCurdy +Author-email: sedrik@gmail.com +Maintainer: Andy McCurdy +Maintainer-email: sedrik@gmail.com +License: MIT +Keywords: Redis,key-value store +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* +Provides-Extra: hiredis +Requires-Dist: hiredis (>=0.1.3) ; extra == 'hiredis' + +redis-py +======== + +The Python interface to the Redis key-value store. + +.. image:: https://secure.travis-ci.org/andymccurdy/redis-py.svg?branch=master + :target: https://travis-ci.org/andymccurdy/redis-py +.. image:: https://readthedocs.org/projects/redis-py/badge/?version=stable&style=flat + :target: https://redis-py.readthedocs.io/en/stable/ +.. image:: https://badge.fury.io/py/redis.svg + :target: https://pypi.org/project/redis/ +.. image:: https://codecov.io/gh/andymccurdy/redis-py/branch/master/graph/badge.svg + :target: https://codecov.io/gh/andymccurdy/redis-py + + +Python 2 Compatibility Note +--------------------------- + +redis-py 3.5.x will be the last version of redis-py that supports Python 2. +The 3.5.x line will continue to get bug fixes and security patches that +support Python 2 until August 1, 2020. redis-py 4.0 will be the next major +version and will require Python 3.5+. + + +Installation +------------ + +redis-py requires a running Redis server. See `Redis's quickstart +`_ for installation instructions. + +redis-py can be installed using `pip` similar to other Python packages. Do not use `sudo` +with `pip`. It is usually good to work in a +`virtualenv `_ or +`venv `_ to avoid conflicts with other package +managers and Python projects. For a quick introduction see +`Python Virtual Environments in Five Minutes `_. + +To install redis-py, simply: + +.. code-block:: bash + + $ pip install redis + +or from source: + +.. code-block:: bash + + $ python setup.py install + + +Getting Started +--------------- + +.. code-block:: pycon + + >>> import redis + >>> r = redis.Redis(host='localhost', port=6379, db=0) + >>> r.set('foo', 'bar') + True + >>> r.get('foo') + b'bar' + +By default, all responses are returned as `bytes` in Python 3 and `str` in +Python 2. The user is responsible for decoding to Python 3 strings or Python 2 +unicode objects. + +If **all** string responses from a client should be decoded, the user can +specify `decode_responses=True` to `Redis.__init__`. In this case, any +Redis command that returns a string type will be decoded with the `encoding` +specified. + + +Upgrading from redis-py 2.X to 3.0 +---------------------------------- + +redis-py 3.0 introduces many new features but required a number of backwards +incompatible changes to be made in the process. This section attempts to +provide an upgrade path for users migrating from 2.X to 3.0. + + +Python Version Support +^^^^^^^^^^^^^^^^^^^^^^ + +redis-py 3.0 supports Python 2.7 and Python 3.5+. + + +Client Classes: Redis and StrictRedis +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +redis-py 3.0 drops support for the legacy "Redis" client class. "StrictRedis" +has been renamed to "Redis" and an alias named "StrictRedis" is provided so +that users previously using "StrictRedis" can continue to run unchanged. + +The 2.X "Redis" class provided alternative implementations of a few commands. +This confused users (rightfully so) and caused a number of support issues. To +make things easier going forward, it was decided to drop support for these +alternate implementations and instead focus on a single client class. + +2.X users that are already using StrictRedis don't have to change the class +name. StrictRedis will continue to work for the foreseeable future. + +2.X users that are using the Redis class will have to make changes if they +use any of the following commands: + +* SETEX: The argument order has changed. The new order is (name, time, value). +* LREM: The argument order has changed. The new order is (name, num, value). +* TTL and PTTL: The return value is now always an int and matches the + official Redis command (>0 indicates the timeout, -1 indicates that the key + exists but that it has no expire time set, -2 indicates that the key does + not exist) + + +SSL Connections +^^^^^^^^^^^^^^^ + +redis-py 3.0 changes the default value of the `ssl_cert_reqs` option from +`None` to `'required'`. See +`Issue 1016 `_. This +change enforces hostname validation when accepting a cert from a remote SSL +terminator. If the terminator doesn't properly set the hostname on the cert +this will cause redis-py 3.0 to raise a ConnectionError. + +This check can be disabled by setting `ssl_cert_reqs` to `None`. Note that +doing so removes the security check. Do so at your own risk. + +It has been reported that SSL certs received from AWS ElastiCache do not have +proper hostnames and turning off hostname verification is currently required. + + +MSET, MSETNX and ZADD +^^^^^^^^^^^^^^^^^^^^^ + +These commands all accept a mapping of key/value pairs. In redis-py 2.X +this mapping could be specified as ``*args`` or as ``**kwargs``. Both of these +styles caused issues when Redis introduced optional flags to ZADD. Relying on +``*args`` caused issues with the optional argument order, especially in Python +2.7. Relying on ``**kwargs`` caused potential collision issues of user keys with +the argument names in the method signature. + +To resolve this, redis-py 3.0 has changed these three commands to all accept +a single positional argument named mapping that is expected to be a dict. For +MSET and MSETNX, the dict is a mapping of key-names -> values. For ZADD, the +dict is a mapping of element-names -> score. + +MSET, MSETNX and ZADD now look like: + +.. code-block:: python + + def mset(self, mapping): + def msetnx(self, mapping): + def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False): + +All 2.X users that use these commands must modify their code to supply +keys and values as a dict to these commands. + + +ZINCRBY +^^^^^^^ + +redis-py 2.X accidentally modified the argument order of ZINCRBY, swapping the +order of value and amount. ZINCRBY now looks like: + +.. code-block:: python + + def zincrby(self, name, amount, value): + +All 2.X users that rely on ZINCRBY must swap the order of amount and value +for the command to continue to work as intended. + + +Encoding of User Input +^^^^^^^^^^^^^^^^^^^^^^ + +redis-py 3.0 only accepts user data as bytes, strings or numbers (ints, longs +and floats). Attempting to specify a key or a value as any other type will +raise a DataError exception. + +redis-py 2.X attempted to coerce any type of input into a string. While +occasionally convenient, this caused all sorts of hidden errors when users +passed boolean values (which were coerced to 'True' or 'False'), a None +value (which was coerced to 'None') or other values, such as user defined +types. + +All 2.X users should make sure that the keys and values they pass into +redis-py are either bytes, strings or numbers. + + +Locks +^^^^^ + +redis-py 3.0 drops support for the pipeline-based Lock and now only supports +the Lua-based lock. In doing so, LuaLock has been renamed to Lock. This also +means that redis-py Lock objects require Redis server 2.6 or greater. + +2.X users that were explicitly referring to "LuaLock" will have to now refer +to "Lock" instead. + + +Locks as Context Managers +^^^^^^^^^^^^^^^^^^^^^^^^^ + +redis-py 3.0 now raises a LockError when using a lock as a context manager and +the lock cannot be acquired within the specified timeout. This is more of a +bug fix than a backwards incompatible change. However, given an error is now +raised where none was before, this might alarm some users. + +2.X users should make sure they're wrapping their lock code in a try/catch +like this: + +.. code-block:: python + + try: + with r.lock('my-lock-key', blocking_timeout=5) as lock: + # code you want executed only after the lock has been acquired + except LockError: + # the lock wasn't acquired + + +API Reference +------------- + +The `official Redis command documentation `_ does a +great job of explaining each command in detail. redis-py attempts to adhere +to the official command syntax. There are a few exceptions: + +* **SELECT**: Not implemented. See the explanation in the Thread Safety section + below. +* **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py + uses 'delete' instead. +* **MULTI/EXEC**: These are implemented as part of the Pipeline class. The + pipeline is wrapped with the MULTI and EXEC statements by default when it + is executed, which can be disabled by specifying transaction=False. + See more about Pipelines below. +* **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate + class as it places the underlying connection in a state where it can't + execute non-pubsub commands. Calling the pubsub method from the Redis client + will return a PubSub instance where you can subscribe to channels and listen + for messages. You can only call PUBLISH from the Redis client (see + `this comment on issue #151 + `_ + for details). +* **SCAN/SSCAN/HSCAN/ZSCAN**: The \*SCAN commands are implemented as they + exist in the Redis documentation. In addition, each command has an equivalent + iterator method. These are purely for convenience so the user doesn't have + to keep track of the cursor while iterating. Use the + scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior. + + +More Detail +----------- + +Connection Pools +^^^^^^^^^^^^^^^^ + +Behind the scenes, redis-py uses a connection pool to manage connections to +a Redis server. By default, each Redis instance you create will in turn create +its own connection pool. You can override this behavior and use an existing +connection pool by passing an already created connection pool instance to the +connection_pool argument of the Redis class. You may choose to do this in order +to implement client side sharding or have fine-grain control of how +connections are managed. + +.. code-block:: pycon + + >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) + >>> r = redis.Redis(connection_pool=pool) + +Connections +^^^^^^^^^^^ + +ConnectionPools manage a set of Connection instances. redis-py ships with two +types of Connections. The default, Connection, is a normal TCP socket based +connection. The UnixDomainSocketConnection allows for clients running on the +same device as the server to connect via a unix domain socket. To use a +UnixDomainSocketConnection connection, simply pass the unix_socket_path +argument, which is a string to the unix domain socket file. Additionally, make +sure the unixsocket parameter is defined in your redis.conf file. It's +commented out by default. + +.. code-block:: pycon + + >>> r = redis.Redis(unix_socket_path='/tmp/redis.sock') + +You can create your own Connection subclasses as well. This may be useful if +you want to control the socket behavior within an async framework. To +instantiate a client class using your own connection, you need to create +a connection pool, passing your class to the connection_class argument. +Other keyword parameters you pass to the pool will be passed to the class +specified during initialization. + +.. code-block:: pycon + + >>> pool = redis.ConnectionPool(connection_class=YourConnectionClass, + your_arg='...', ...) + +Connections maintain an open socket to the Redis server. Sometimes these +sockets are interrupted or disconnected for a variety of reasons. For example, +network appliances, load balancers and other services that sit between clients +and servers are often configured to kill connections that remain idle for a +given threshold. + +When a connection becomes disconnected, the next command issued on that +connection will fail and redis-py will raise a ConnectionError to the caller. +This allows each application that uses redis-py to handle errors in a way +that's fitting for that specific application. However, constant error +handling can be verbose and cumbersome, especially when socket disconnections +happen frequently in many production environments. + +To combat this, redis-py can issue regular health checks to assess the +liveliness of a connection just before issuing a command. Users can pass +``health_check_interval=N`` to the Redis or ConnectionPool classes or +as a query argument within a Redis URL. The value of ``health_check_interval`` +must be an integer. A value of ``0``, the default, disables health checks. +Any positive integer will enable health checks. Health checks are performed +just before a command is executed if the underlying connection has been idle +for more than ``health_check_interval`` seconds. For example, +``health_check_interval=30`` will ensure that a health check is run on any +connection that has been idle for 30 or more seconds just before a command +is executed on that connection. + +If your application is running in an environment that disconnects idle +connections after 30 seconds you should set the ``health_check_interval`` +option to a value less than 30. + +This option also works on any PubSub connection that is created from a +client with ``health_check_interval`` enabled. PubSub users need to ensure +that ``get_message()`` or ``listen()`` are called more frequently than +``health_check_interval`` seconds. It is assumed that most workloads already +do this. + +If your PubSub use case doesn't call ``get_message()`` or ``listen()`` +frequently, you should call ``pubsub.check_health()`` explicitly on a +regularly basis. + +Parsers +^^^^^^^ + +Parser classes provide a way to control how responses from the Redis server +are parsed. redis-py ships with two parser classes, the PythonParser and the +HiredisParser. By default, redis-py will attempt to use the HiredisParser if +you have the hiredis module installed and will fallback to the PythonParser +otherwise. + +Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was +kind enough to create Python bindings. Using Hiredis can provide up to a +10x speed improvement in parsing responses from the Redis server. The +performance increase is most noticeable when retrieving many pieces of data, +such as from LRANGE or SMEMBERS operations. + +Hiredis is available on PyPI, and can be installed via pip just like redis-py. + +.. code-block:: bash + + $ pip install hiredis + +Response Callbacks +^^^^^^^^^^^^^^^^^^ + +The client class uses a set of callbacks to cast Redis responses to the +appropriate Python type. There are a number of these callbacks defined on +the Redis client class in a dictionary called RESPONSE_CALLBACKS. + +Custom callbacks can be added on a per-instance basis using the +set_response_callback method. This method accepts two arguments: a command +name and the callback. Callbacks added in this manner are only valid on the +instance the callback is added to. If you want to define or override a callback +globally, you should make a subclass of the Redis client and add your callback +to its RESPONSE_CALLBACKS class dictionary. + +Response callbacks take at least one parameter: the response from the Redis +server. Keyword arguments may also be accepted in order to further control +how to interpret the response. These keyword arguments are specified during the +command's call to execute_command. The ZRANGE implementation demonstrates the +use of response callback keyword arguments with its "withscores" argument. + +Thread Safety +^^^^^^^^^^^^^ + +Redis client instances can safely be shared between threads. Internally, +connection instances are only retrieved from the connection pool during +command execution, and returned to the pool directly after. Command execution +never modifies state on the client instance. + +However, there is one caveat: the Redis SELECT command. The SELECT command +allows you to switch the database currently in use by the connection. That +database remains selected until another is selected or until the connection is +closed. This creates an issue in that connections could be returned to the pool +that are connected to a different database. + +As a result, redis-py does not implement the SELECT command on client +instances. If you use multiple Redis databases within the same application, you +should create a separate client instance (and possibly a separate connection +pool) for each database. + +It is not safe to pass PubSub or Pipeline objects between threads. + +Pipelines +^^^^^^^^^ + +Pipelines are a subclass of the base Redis class that provide support for +buffering multiple commands to the server in a single request. They can be used +to dramatically increase the performance of groups of commands by reducing the +number of back-and-forth TCP packets between the client and server. + +Pipelines are quite simple to use: + +.. code-block:: pycon + + >>> r = redis.Redis(...) + >>> r.set('bing', 'baz') + >>> # Use the pipeline() method to create a pipeline instance + >>> pipe = r.pipeline() + >>> # The following SET commands are buffered + >>> pipe.set('foo', 'bar') + >>> pipe.get('bing') + >>> # the EXECUTE call sends all buffered commands to the server, returning + >>> # a list of responses, one for each command. + >>> pipe.execute() + [True, b'baz'] + +For ease of use, all commands being buffered into the pipeline return the +pipeline object itself. Therefore calls can be chained like: + +.. code-block:: pycon + + >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() + [True, True, 6] + +In addition, pipelines can also ensure the buffered commands are executed +atomically as a group. This happens by default. If you want to disable the +atomic nature of a pipeline but still want to buffer commands, you can turn +off transactions. + +.. code-block:: pycon + + >>> pipe = r.pipeline(transaction=False) + +A common issue occurs when requiring atomic transactions but needing to +retrieve values in Redis prior for use within the transaction. For instance, +let's assume that the INCR command didn't exist and we need to build an atomic +version of INCR in Python. + +The completely naive implementation could GET the value, increment it in +Python, and SET the new value back. However, this is not atomic because +multiple clients could be doing this at the same time, each getting the same +value from GET. + +Enter the WATCH command. WATCH provides the ability to monitor one or more keys +prior to starting a transaction. If any of those keys change prior the +execution of that transaction, the entire transaction will be canceled and a +WatchError will be raised. To implement our own client-side INCR command, we +could do something like this: + +.. code-block:: pycon + + >>> with r.pipeline() as pipe: + ... while True: + ... try: + ... # put a WATCH on the key that holds our sequence value + ... pipe.watch('OUR-SEQUENCE-KEY') + ... # after WATCHing, the pipeline is put into immediate execution + ... # mode until we tell it to start buffering commands again. + ... # this allows us to get the current value of our sequence + ... current_value = pipe.get('OUR-SEQUENCE-KEY') + ... next_value = int(current_value) + 1 + ... # now we can put the pipeline back into buffered mode with MULTI + ... pipe.multi() + ... pipe.set('OUR-SEQUENCE-KEY', next_value) + ... # and finally, execute the pipeline (the set command) + ... pipe.execute() + ... # if a WatchError wasn't raised during execution, everything + ... # we just did happened atomically. + ... break + ... except WatchError: + ... # another client must have changed 'OUR-SEQUENCE-KEY' between + ... # the time we started WATCHing it and the pipeline's execution. + ... # our best bet is to just retry. + ... continue + +Note that, because the Pipeline must bind to a single connection for the +duration of a WATCH, care must be taken to ensure that the connection is +returned to the connection pool by calling the reset() method. If the +Pipeline is used as a context manager (as in the example above) reset() +will be called automatically. Of course you can do this the manual way by +explicitly calling reset(): + +.. code-block:: pycon + + >>> pipe = r.pipeline() + >>> while True: + ... try: + ... pipe.watch('OUR-SEQUENCE-KEY') + ... ... + ... pipe.execute() + ... break + ... except WatchError: + ... continue + ... finally: + ... pipe.reset() + +A convenience method named "transaction" exists for handling all the +boilerplate of handling and retrying watch errors. It takes a callable that +should expect a single parameter, a pipeline object, and any number of keys to +be WATCHed. Our client-side INCR command above can be written like this, +which is much easier to read: + +.. code-block:: pycon + + >>> def client_side_incr(pipe): + ... current_value = pipe.get('OUR-SEQUENCE-KEY') + ... next_value = int(current_value) + 1 + ... pipe.multi() + ... pipe.set('OUR-SEQUENCE-KEY', next_value) + >>> + >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') + [True] + +Be sure to call `pipe.multi()` in the callable passed to `Redis.transaction` +prior to any write commands. + +Publish / Subscribe +^^^^^^^^^^^^^^^^^^^ + +redis-py includes a `PubSub` object that subscribes to channels and listens +for new messages. Creating a `PubSub` object is easy. + +.. code-block:: pycon + + >>> r = redis.Redis(...) + >>> p = r.pubsub() + +Once a `PubSub` instance is created, channels and patterns can be subscribed +to. + +.. code-block:: pycon + + >>> p.subscribe('my-first-channel', 'my-second-channel', ...) + >>> p.psubscribe('my-*', ...) + +The `PubSub` instance is now subscribed to those channels/patterns. The +subscription confirmations can be seen by reading messages from the `PubSub` +instance. + +.. code-block:: pycon + + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1} + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2} + >>> p.get_message() + {'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3} + +Every message read from a `PubSub` instance will be a dictionary with the +following keys. + +* **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe', + 'punsubscribe', 'message', 'pmessage' +* **channel**: The channel [un]subscribed to or the channel a message was + published to +* **pattern**: The pattern that matched a published message's channel. Will be + `None` in all cases except for 'pmessage' types. +* **data**: The message data. With [un]subscribe messages, this value will be + the number of channels and patterns the connection is currently subscribed + to. With [p]message messages, this value will be the actual published + message. + +Let's send a message now. + +.. code-block:: pycon + + # the publish method returns the number matching channel and pattern + # subscriptions. 'my-first-channel' matches both the 'my-first-channel' + # subscription and the 'my-*' pattern subscription, so this message will + # be delivered to 2 channels/patterns + >>> r.publish('my-first-channel', 'some data') + 2 + >>> p.get_message() + {'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'} + >>> p.get_message() + {'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'} + +Unsubscribing works just like subscribing. If no arguments are passed to +[p]unsubscribe, all channels or patterns will be unsubscribed from. + +.. code-block:: pycon + + >>> p.unsubscribe() + >>> p.punsubscribe('my-*') + >>> p.get_message() + {'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'} + >>> p.get_message() + {'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'} + >>> p.get_message() + {'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'} + +redis-py also allows you to register callback functions to handle published +messages. Message handlers take a single argument, the message, which is a +dictionary just like the examples above. To subscribe to a channel or pattern +with a message handler, pass the channel or pattern name as a keyword argument +with its value being the callback function. + +When a message is read on a channel or pattern with a message handler, the +message dictionary is created and passed to the message handler. In this case, +a `None` value is returned from get_message() since the message was already +handled. + +.. code-block:: pycon + + >>> def my_handler(message): + ... print('MY HANDLER: ', message['data']) + >>> p.subscribe(**{'my-channel': my_handler}) + # read the subscribe confirmation message + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1} + >>> r.publish('my-channel', 'awesome data') + 1 + # for the message handler to work, we need tell the instance to read data. + # this can be done in several ways (read more below). we'll just use + # the familiar get_message() function for now + >>> message = p.get_message() + MY HANDLER: awesome data + # note here that the my_handler callback printed the string above. + # `message` is None because the message was handled by our handler. + >>> print(message) + None + +If your application is not interested in the (sometimes noisy) +subscribe/unsubscribe confirmation messages, you can ignore them by passing +`ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all +subscribe/unsubscribe messages to be read, but they won't bubble up to your +application. + +.. code-block:: pycon + + >>> p = r.pubsub(ignore_subscribe_messages=True) + >>> p.subscribe('my-channel') + >>> p.get_message() # hides the subscribe message and returns None + >>> r.publish('my-channel', 'my data') + 1 + >>> p.get_message() + {'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'} + +There are three different strategies for reading messages. + +The examples above have been using `pubsub.get_message()`. Behind the scenes, +`get_message()` uses the system's 'select' module to quickly poll the +connection's socket. If there's data available to be read, `get_message()` will +read it, format the message and return it or pass it to a message handler. If +there's no data to be read, `get_message()` will immediately return None. This +makes it trivial to integrate into an existing event loop inside your +application. + +.. code-block:: pycon + + >>> while True: + >>> message = p.get_message() + >>> if message: + >>> # do something with the message + >>> time.sleep(0.001) # be nice to the system :) + +Older versions of redis-py only read messages with `pubsub.listen()`. listen() +is a generator that blocks until a message is available. If your application +doesn't need to do anything else but receive and act on messages received from +redis, listen() is an easy way to get up an running. + +.. code-block:: pycon + + >>> for message in p.listen(): + ... # do something with the message + +The third option runs an event loop in a separate thread. +`pubsub.run_in_thread()` creates a new thread and starts the event loop. The +thread object is returned to the caller of `run_in_thread()`. The caller can +use the `thread.stop()` method to shut down the event loop and thread. Behind +the scenes, this is simply a wrapper around `get_message()` that runs in a +separate thread, essentially creating a tiny non-blocking event loop for you. +`run_in_thread()` takes an optional `sleep_time` argument. If specified, the +event loop will call `time.sleep()` with the value in each iteration of the +loop. + +Note: Since we're running in a separate thread, there's no way to handle +messages that aren't automatically handled with registered message handlers. +Therefore, redis-py prevents you from calling `run_in_thread()` if you're +subscribed to patterns or channels that don't have message handlers attached. + +.. code-block:: pycon + + >>> p.subscribe(**{'my-channel': my_handler}) + >>> thread = p.run_in_thread(sleep_time=0.001) + # the event loop is now running in the background processing messages + # when it's time to shut it down... + >>> thread.stop() + +A PubSub object adheres to the same encoding semantics as the client instance +it was created from. Any channel or pattern that's unicode will be encoded +using the `charset` specified on the client before being sent to Redis. If the +client's `decode_responses` flag is set the False (the default), the +'channel', 'pattern' and 'data' values in message dictionaries will be byte +strings (str on Python 2, bytes on Python 3). If the client's +`decode_responses` is True, then the 'channel', 'pattern' and 'data' values +will be automatically decoded to unicode strings using the client's `charset`. + +PubSub objects remember what channels and patterns they are subscribed to. In +the event of a disconnection such as a network error or timeout, the +PubSub object will re-subscribe to all prior channels and patterns when +reconnecting. Messages that were published while the client was disconnected +cannot be delivered. When you're finished with a PubSub object, call its +`.close()` method to shutdown the connection. + +.. code-block:: pycon + + >>> p = r.pubsub() + >>> ... + >>> p.close() + + +The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also +supported: + +.. code-block:: pycon + + >>> r.pubsub_channels() + [b'foo', b'bar'] + >>> r.pubsub_numsub('foo', 'bar') + [(b'foo', 9001), (b'bar', 42)] + >>> r.pubsub_numsub('baz') + [(b'baz', 0)] + >>> r.pubsub_numpat() + 1204 + +Monitor +^^^^^^^ +redis-py includes a `Monitor` object that streams every command processed +by the Redis server. Use `listen()` on the `Monitor` object to block +until a command is received. + +.. code-block:: pycon + + >>> r = redis.Redis(...) + >>> with r.monitor() as m: + >>> for command in m.listen(): + >>> print(command) + +Lua Scripting +^^^^^^^^^^^^^ + +redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are +a number of edge cases that make these commands tedious to use in real world +scenarios. Therefore, redis-py exposes a Script object that makes scripting +much easier to use. + +To create a Script instance, use the `register_script` function on a client +instance passing the Lua code as the first argument. `register_script` returns +a Script instance that you can use throughout your code. + +The following trivial Lua script accepts two parameters: the name of a key and +a multiplier value. The script fetches the value stored in the key, multiplies +it with the multiplier value and returns the result. + +.. code-block:: pycon + + >>> r = redis.Redis() + >>> lua = """ + ... local value = redis.call('GET', KEYS[1]) + ... value = tonumber(value) + ... return value * ARGV[1]""" + >>> multiply = r.register_script(lua) + +`multiply` is now a Script instance that is invoked by calling it like a +function. Script instances accept the following optional arguments: + +* **keys**: A list of key names that the script will access. This becomes the + KEYS list in Lua. +* **args**: A list of argument values. This becomes the ARGV list in Lua. +* **client**: A redis-py Client or Pipeline instance that will invoke the + script. If client isn't specified, the client that initially + created the Script instance (the one that `register_script` was + invoked from) will be used. + +Continuing the example from above: + +.. code-block:: pycon + + >>> r.set('foo', 2) + >>> multiply(keys=['foo'], args=[5]) + 10 + +The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is +passed to the script along with the multiplier value of 5. Lua executes the +script and returns the result, 10. + +Script instances can be executed using a different client instance, even one +that points to a completely different Redis server. + +.. code-block:: pycon + + >>> r2 = redis.Redis('redis2.example.com') + >>> r2.set('foo', 3) + >>> multiply(keys=['foo'], args=[5], client=r2) + 15 + +The Script object ensures that the Lua script is loaded into Redis's script +cache. In the event of a NOSCRIPT error, it will load the script and retry +executing it. + +Script objects can also be used in pipelines. The pipeline instance should be +passed as the client argument when calling the script. Care is taken to ensure +that the script is registered in Redis's script cache just prior to pipeline +execution. + +.. code-block:: pycon + + >>> pipe = r.pipeline() + >>> pipe.set('foo', 5) + >>> multiply(keys=['foo'], args=[5], client=pipe) + >>> pipe.execute() + [True, 25] + +Sentinel support +^^^^^^^^^^^^^^^^ + +redis-py can be used together with `Redis Sentinel `_ +to discover Redis nodes. You need to have at least one Sentinel daemon running +in order to use redis-py's Sentinel support. + +Connecting redis-py to the Sentinel instance(s) is easy. You can use a +Sentinel connection to discover the master and slaves network addresses: + +.. code-block:: pycon + + >>> from redis.sentinel import Sentinel + >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) + >>> sentinel.discover_master('mymaster') + ('127.0.0.1', 6379) + >>> sentinel.discover_slaves('mymaster') + [('127.0.0.1', 6380)] + +You can also create Redis client connections from a Sentinel instance. You can +connect to either the master (for write operations) or a slave (for read-only +operations). + +.. code-block:: pycon + + >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) + >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) + >>> master.set('foo', 'bar') + >>> slave.get('foo') + b'bar' + +The master and slave objects are normal Redis instances with their +connection pool bound to the Sentinel instance. When a Sentinel backed client +attempts to establish a connection, it first queries the Sentinel servers to +determine an appropriate host to connect to. If no server is found, +a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are +subclasses of ConnectionError. + +When trying to connect to a slave client, the Sentinel connection pool will +iterate over the list of slaves until it finds one that can be connected to. +If no slaves can be connected to, a connection will be established with the +master. + +See `Guidelines for Redis clients with support for Redis Sentinel +`_ to learn more about Redis Sentinel. + +Scan Iterators +^^^^^^^^^^^^^^ + +The \*SCAN commands introduced in Redis 2.8 can be cumbersome to use. While +these commands are fully supported, redis-py also exposes the following methods +that return Python iterators for convenience: `scan_iter`, `hscan_iter`, +`sscan_iter` and `zscan_iter`. + +.. code-block:: pycon + + >>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')): + ... r.set(key, value) + >>> for key in r.scan_iter(): + ... print(key, r.get(key)) + A 1 + B 2 + C 3 + +Author +^^^^^^ + +redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). +It can be found here: https://github.com/andymccurdy/redis-py + +Special thanks to: + +* Ludovico Magnocavallo, author of the original Python Redis client, from + which some of the socket code is still used. +* Alexander Solovyov for ideas on the generic response callback system. +* Paul Hubbard for initial packaging support. + + diff --git a/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/RECORD b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/RECORD new file mode 100644 index 0000000..2b1b8bc --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/RECORD @@ -0,0 +1,22 @@ +redis-3.5.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +redis-3.5.2.dist-info/LICENSE,sha256=eQFI2MEvijiycHp0viNDMWutEmmV_1SAGhgbiyMboSQ,1074 +redis-3.5.2.dist-info/METADATA,sha256=1S43bhBSoRk6JkBbRF2FaUDzr48m32yHicEy6hrjZLw,36674 +redis-3.5.2.dist-info/RECORD,, +redis-3.5.2.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 +redis-3.5.2.dist-info/top_level.txt,sha256=OMAefszlde6ZoOtlM35AWzpRIrwtcqAMHGlRit-w2-4,6 +redis/__init__.py,sha256=U3eh1OAZ87NT6pppHLMWmApe8_2YoOMj7sy1N8m3dT4,1209 +redis/__pycache__/__init__.cpython-37.pyc,, +redis/__pycache__/_compat.cpython-37.pyc,, +redis/__pycache__/client.cpython-37.pyc,, +redis/__pycache__/connection.cpython-37.pyc,, +redis/__pycache__/exceptions.cpython-37.pyc,, +redis/__pycache__/lock.cpython-37.pyc,, +redis/__pycache__/sentinel.cpython-37.pyc,, +redis/__pycache__/utils.cpython-37.pyc,, +redis/_compat.py,sha256=opM78DdCy4D86p9cpN_O81yNgjVDUwOJGLtMS4LL9-0,5698 +redis/client.py,sha256=O5zjv95LO7_TnsPfaTHXNvqNiMqzs_1wD59CI7UBeHk,159479 +redis/connection.py,sha256=MXU__pk5cWt5OAaLZzoTBezHtq2SBksT97HIK3iCb4U,54481 +redis/exceptions.py,sha256=phjjyJjnebrM82XDzfjtreGnkWIoSNfDZiyoWs3_zQE,1341 +redis/lock.py,sha256=VNfWNN46FBwhcPUnFmzC8N8uLuxCsu2YT2drkEzM6_U,11349 +redis/sentinel.py,sha256=IKzrrtgzbjVvI7r50DwKW3pK_yoNIBkLiKskYsOm5_M,11359 +redis/utils.py,sha256=wG1Ws79_HgIzAALwYwK4CrVLLloVTRPRqjo1gxF4U7U,674 diff --git a/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/WHEEL b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/WHEEL new file mode 100644 index 0000000..ef99c6c --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/top_level.txt b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/top_level.txt new file mode 100644 index 0000000..7800f0f --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis-3.5.2.dist-info/top_level.txt @@ -0,0 +1 @@ +redis diff --git a/infrastructure/lambda/task_queue_manager/redis/__init__.py b/infrastructure/lambda/task_queue_manager/redis/__init__.py new file mode 100644 index 0000000..5b055bb --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis/__init__.py @@ -0,0 +1,59 @@ +from redis.client import Redis, StrictRedis +from redis.connection import ( + BlockingConnectionPool, + ConnectionPool, + Connection, + SSLConnection, + UnixDomainSocketConnection +) +from redis.utils import from_url +from redis.exceptions import ( + AuthenticationError, + AuthenticationWrongNumberOfArgsError, + BusyLoadingError, + ChildDeadlockedError, + ConnectionError, + DataError, + InvalidResponse, + PubSubError, + ReadOnlyError, + RedisError, + ResponseError, + TimeoutError, + WatchError +) + + +def int_or_str(value): + try: + return int(value) + except ValueError: + return value + + +__version__ = '3.5.2' +VERSION = tuple(map(int_or_str, __version__.split('.'))) + +__all__ = [ + 'AuthenticationError', + 'AuthenticationWrongNumberOfArgsError', + 'BlockingConnectionPool', + 'BusyLoadingError', + 'ChildDeadlockedError', + 'Connection', + 'ConnectionError', + 'ConnectionPool', + 'DataError', + 'from_url', + 'InvalidResponse', + 'PubSubError', + 'ReadOnlyError', + 'Redis', + 'RedisError', + 'ResponseError', + 'SSLConnection', + 'StrictRedis', + 'TimeoutError', + 'UnixDomainSocketConnection', + 'WatchError', +] diff --git a/infrastructure/lambda/task_queue_manager/redis/__pycache__/__init__.cpython-37.pyc b/infrastructure/lambda/task_queue_manager/redis/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..a04ecf8 Binary files /dev/null and b/infrastructure/lambda/task_queue_manager/redis/__pycache__/__init__.cpython-37.pyc differ diff --git a/infrastructure/lambda/task_queue_manager/redis/__pycache__/_compat.cpython-37.pyc b/infrastructure/lambda/task_queue_manager/redis/__pycache__/_compat.cpython-37.pyc new file mode 100644 index 0000000..38d341b Binary files /dev/null and b/infrastructure/lambda/task_queue_manager/redis/__pycache__/_compat.cpython-37.pyc differ diff --git a/infrastructure/lambda/task_queue_manager/redis/__pycache__/client.cpython-37.pyc b/infrastructure/lambda/task_queue_manager/redis/__pycache__/client.cpython-37.pyc new file mode 100644 index 0000000..da0958d Binary files /dev/null and b/infrastructure/lambda/task_queue_manager/redis/__pycache__/client.cpython-37.pyc differ diff --git a/infrastructure/lambda/task_queue_manager/redis/__pycache__/connection.cpython-37.pyc b/infrastructure/lambda/task_queue_manager/redis/__pycache__/connection.cpython-37.pyc new file mode 100644 index 0000000..31283d2 Binary files /dev/null and b/infrastructure/lambda/task_queue_manager/redis/__pycache__/connection.cpython-37.pyc differ diff --git a/infrastructure/lambda/task_queue_manager/redis/__pycache__/exceptions.cpython-37.pyc b/infrastructure/lambda/task_queue_manager/redis/__pycache__/exceptions.cpython-37.pyc new file mode 100644 index 0000000..3a74416 Binary files /dev/null and b/infrastructure/lambda/task_queue_manager/redis/__pycache__/exceptions.cpython-37.pyc differ diff --git a/infrastructure/lambda/task_queue_manager/redis/__pycache__/lock.cpython-37.pyc b/infrastructure/lambda/task_queue_manager/redis/__pycache__/lock.cpython-37.pyc new file mode 100644 index 0000000..964109b Binary files /dev/null and b/infrastructure/lambda/task_queue_manager/redis/__pycache__/lock.cpython-37.pyc differ diff --git a/infrastructure/lambda/task_queue_manager/redis/__pycache__/sentinel.cpython-37.pyc b/infrastructure/lambda/task_queue_manager/redis/__pycache__/sentinel.cpython-37.pyc new file mode 100644 index 0000000..a3674c3 Binary files /dev/null and b/infrastructure/lambda/task_queue_manager/redis/__pycache__/sentinel.cpython-37.pyc differ diff --git a/infrastructure/lambda/task_queue_manager/redis/__pycache__/utils.cpython-37.pyc b/infrastructure/lambda/task_queue_manager/redis/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000..af8a1d4 Binary files /dev/null and b/infrastructure/lambda/task_queue_manager/redis/__pycache__/utils.cpython-37.pyc differ diff --git a/infrastructure/lambda/task_queue_manager/redis/_compat.py b/infrastructure/lambda/task_queue_manager/redis/_compat.py new file mode 100644 index 0000000..a0036de --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis/_compat.py @@ -0,0 +1,188 @@ +"""Internal module for Python 2 backwards compatibility.""" +# flake8: noqa +import errno +import socket +import sys + + +def sendall(sock, *args, **kwargs): + return sock.sendall(*args, **kwargs) + + +def shutdown(sock, *args, **kwargs): + return sock.shutdown(*args, **kwargs) + + +def ssl_wrap_socket(context, sock, *args, **kwargs): + return context.wrap_socket(sock, *args, **kwargs) + + +# For Python older than 3.5, retry EINTR. +if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and + sys.version_info[1] < 5): + # Adapted from https://bugs.python.org/review/23863/patch/14532/54418 + import time + + # Wrapper for handling interruptable system calls. + def _retryable_call(s, func, *args, **kwargs): + # Some modules (SSL) use the _fileobject wrapper directly and + # implement a smaller portion of the socket interface, thus we + # need to let them continue to do so. + timeout, deadline = None, 0.0 + attempted = False + try: + timeout = s.gettimeout() + except AttributeError: + pass + + if timeout: + deadline = time.time() + timeout + + try: + while True: + if attempted and timeout: + now = time.time() + if now >= deadline: + raise socket.error(errno.EWOULDBLOCK, "timed out") + else: + # Overwrite the timeout on the socket object + # to take into account elapsed time. + s.settimeout(deadline - now) + try: + attempted = True + return func(*args, **kwargs) + except socket.error as e: + if e.args[0] == errno.EINTR: + continue + raise + finally: + # Set the existing timeout back for future + # calls. + if timeout: + s.settimeout(timeout) + + def recv(sock, *args, **kwargs): + return _retryable_call(sock, sock.recv, *args, **kwargs) + + def recv_into(sock, *args, **kwargs): + return _retryable_call(sock, sock.recv_into, *args, **kwargs) + +else: # Python 3.5 and above automatically retry EINTR + def recv(sock, *args, **kwargs): + return sock.recv(*args, **kwargs) + + def recv_into(sock, *args, **kwargs): + return sock.recv_into(*args, **kwargs) + +if sys.version_info[0] < 3: + # In Python 3, the ssl module raises socket.timeout whereas it raises + # SSLError in Python 2. For compatibility between versions, ensure + # socket.timeout is raised for both. + import functools + + try: + from ssl import SSLError as _SSLError + except ImportError: + class _SSLError(Exception): + """A replacement in case ssl.SSLError is not available.""" + pass + + _EXPECTED_SSL_TIMEOUT_MESSAGES = ( + "The handshake operation timed out", + "The read operation timed out", + "The write operation timed out", + ) + + def _handle_ssl_timeout(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except _SSLError as e: + message = len(e.args) == 1 and unicode(e.args[0]) or '' + if any(x in message for x in _EXPECTED_SSL_TIMEOUT_MESSAGES): + # Raise socket.timeout for compatibility with Python 3. + raise socket.timeout(*e.args) + raise + return wrapper + + recv = _handle_ssl_timeout(recv) + recv_into = _handle_ssl_timeout(recv_into) + sendall = _handle_ssl_timeout(sendall) + shutdown = _handle_ssl_timeout(shutdown) + ssl_wrap_socket = _handle_ssl_timeout(ssl_wrap_socket) + +if sys.version_info[0] < 3: + from urllib import unquote + from urlparse import parse_qs, urlparse + from itertools import imap, izip + from string import letters as ascii_letters + from Queue import Queue + + # special unicode handling for python2 to avoid UnicodeDecodeError + def safe_unicode(obj, *args): + """ return the unicode representation of obj """ + try: + return unicode(obj, *args) + except UnicodeDecodeError: + # obj is byte string + ascii_text = str(obj).encode('string_escape') + return unicode(ascii_text) + + def iteritems(x): + return x.iteritems() + + def iterkeys(x): + return x.iterkeys() + + def itervalues(x): + return x.itervalues() + + def nativestr(x): + return x if isinstance(x, str) else x.encode('utf-8', 'replace') + + def next(x): + return x.next() + + unichr = unichr + xrange = xrange + basestring = basestring + unicode = unicode + long = long + BlockingIOError = socket.error +else: + from urllib.parse import parse_qs, unquote, urlparse + from string import ascii_letters + from queue import Queue + + def iteritems(x): + return iter(x.items()) + + def iterkeys(x): + return iter(x.keys()) + + def itervalues(x): + return iter(x.values()) + + def nativestr(x): + return x if isinstance(x, str) else x.decode('utf-8', 'replace') + + def safe_unicode(value): + if isinstance(value, bytes): + value = value.decode('utf-8', 'replace') + return str(value) + + next = next + unichr = chr + imap = map + izip = zip + xrange = range + basestring = str + unicode = str + long = int + BlockingIOError = BlockingIOError + +try: # Python 3 + from queue import LifoQueue, Empty, Full +except ImportError: # Python 2 + from Queue import LifoQueue, Empty, Full diff --git a/infrastructure/lambda/task_queue_manager/redis/client.py b/infrastructure/lambda/task_queue_manager/redis/client.py new file mode 100644 index 0000000..6777959 --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis/client.py @@ -0,0 +1,4170 @@ +from __future__ import unicode_literals +from itertools import chain +import datetime +import warnings +import time +import threading +import time as mod_time +import re +import hashlib +from redis._compat import (basestring, imap, iteritems, iterkeys, + itervalues, izip, long, nativestr, safe_unicode) +from redis.connection import (ConnectionPool, UnixDomainSocketConnection, + SSLConnection) +from redis.lock import Lock +from redis.exceptions import ( + ConnectionError, + DataError, + ExecAbortError, + NoScriptError, + PubSubError, + RedisError, + ResponseError, + TimeoutError, + WatchError, +) + +SYM_EMPTY = b'' +EMPTY_RESPONSE = 'EMPTY_RESPONSE' + + +def list_or_args(keys, args): + # returns a single new list combining keys and args + try: + iter(keys) + # a string or bytes instance can be iterated, but indicates + # keys wasn't passed as a list + if isinstance(keys, (basestring, bytes)): + keys = [keys] + else: + keys = list(keys) + except TypeError: + keys = [keys] + if args: + keys.extend(args) + return keys + + +def timestamp_to_datetime(response): + "Converts a unix timestamp to a Python datetime object" + if not response: + return None + try: + response = int(response) + except ValueError: + return None + return datetime.datetime.fromtimestamp(response) + + +def string_keys_to_dict(key_string, callback): + return dict.fromkeys(key_string.split(), callback) + + +def dict_merge(*dicts): + merged = {} + for d in dicts: + merged.update(d) + return merged + + +class CaseInsensitiveDict(dict): + "Case insensitive dict implementation. Assumes string keys only." + + def __init__(self, data): + for k, v in iteritems(data): + self[k.upper()] = v + + def __contains__(self, k): + return super(CaseInsensitiveDict, self).__contains__(k.upper()) + + def __delitem__(self, k): + super(CaseInsensitiveDict, self).__delitem__(k.upper()) + + def __getitem__(self, k): + return super(CaseInsensitiveDict, self).__getitem__(k.upper()) + + def get(self, k, default=None): + return super(CaseInsensitiveDict, self).get(k.upper(), default) + + def __setitem__(self, k, v): + super(CaseInsensitiveDict, self).__setitem__(k.upper(), v) + + def update(self, data): + data = CaseInsensitiveDict(data) + super(CaseInsensitiveDict, self).update(data) + + +def parse_debug_object(response): + "Parse the results of Redis's DEBUG OBJECT command into a Python dict" + # The 'type' of the object is the first item in the response, but isn't + # prefixed with a name + response = nativestr(response) + response = 'type:' + response + response = dict(kv.split(':') for kv in response.split()) + + # parse some expected int values from the string response + # note: this cmd isn't spec'd so these may not appear in all redis versions + int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle') + for field in int_fields: + if field in response: + response[field] = int(response[field]) + + return response + + +def parse_object(response, infotype): + "Parse the results of an OBJECT command" + if infotype in ('idletime', 'refcount'): + return int_or_none(response) + return response + + +def parse_info(response): + "Parse the result of Redis's INFO command into a Python dict" + info = {} + response = nativestr(response) + + def get_value(value): + if ',' not in value or '=' not in value: + try: + if '.' in value: + return float(value) + else: + return int(value) + except ValueError: + return value + else: + sub_dict = {} + for item in value.split(','): + k, v = item.rsplit('=', 1) + sub_dict[k] = get_value(v) + return sub_dict + + for line in response.splitlines(): + if line and not line.startswith('#'): + if line.find(':') != -1: + # Split, the info fields keys and values. + # Note that the value may contain ':'. but the 'host:' + # pseudo-command is the only case where the key contains ':' + key, value = line.split(':', 1) + if key == 'cmdstat_host': + key, value = line.rsplit(':', 1) + info[key] = get_value(value) + else: + # if the line isn't splittable, append it to the "__raw__" key + info.setdefault('__raw__', []).append(line) + + return info + + +def parse_memory_stats(response, **kwargs): + "Parse the results of MEMORY STATS" + stats = pairs_to_dict(response, + decode_keys=True, + decode_string_values=True) + for key, value in iteritems(stats): + if key.startswith('db.'): + stats[key] = pairs_to_dict(value, + decode_keys=True, + decode_string_values=True) + return stats + + +SENTINEL_STATE_TYPES = { + 'can-failover-its-master': int, + 'config-epoch': int, + 'down-after-milliseconds': int, + 'failover-timeout': int, + 'info-refresh': int, + 'last-hello-message': int, + 'last-ok-ping-reply': int, + 'last-ping-reply': int, + 'last-ping-sent': int, + 'master-link-down-time': int, + 'master-port': int, + 'num-other-sentinels': int, + 'num-slaves': int, + 'o-down-time': int, + 'pending-commands': int, + 'parallel-syncs': int, + 'port': int, + 'quorum': int, + 'role-reported-time': int, + 's-down-time': int, + 'slave-priority': int, + 'slave-repl-offset': int, + 'voted-leader-epoch': int +} + + +def parse_sentinel_state(item): + result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES) + flags = set(result['flags'].split(',')) + for name, flag in (('is_master', 'master'), ('is_slave', 'slave'), + ('is_sdown', 's_down'), ('is_odown', 'o_down'), + ('is_sentinel', 'sentinel'), + ('is_disconnected', 'disconnected'), + ('is_master_down', 'master_down')): + result[name] = flag in flags + return result + + +def parse_sentinel_master(response): + return parse_sentinel_state(imap(nativestr, response)) + + +def parse_sentinel_masters(response): + result = {} + for item in response: + state = parse_sentinel_state(imap(nativestr, item)) + result[state['name']] = state + return result + + +def parse_sentinel_slaves_and_sentinels(response): + return [parse_sentinel_state(imap(nativestr, item)) for item in response] + + +def parse_sentinel_get_master(response): + return response and (response[0], int(response[1])) or None + + +def nativestr_if_bytes(value): + return nativestr(value) if isinstance(value, bytes) else value + + +def pairs_to_dict(response, decode_keys=False, decode_string_values=False): + "Create a dict given a list of key/value pairs" + if response is None: + return {} + if decode_keys or decode_string_values: + # the iter form is faster, but I don't know how to make that work + # with a nativestr() map + keys = response[::2] + if decode_keys: + keys = imap(nativestr, keys) + values = response[1::2] + if decode_string_values: + values = imap(nativestr_if_bytes, values) + return dict(izip(keys, values)) + else: + it = iter(response) + return dict(izip(it, it)) + + +def pairs_to_dict_typed(response, type_info): + it = iter(response) + result = {} + for key, value in izip(it, it): + if key in type_info: + try: + value = type_info[key](value) + except Exception: + # if for some reason the value can't be coerced, just use + # the string value + pass + result[key] = value + return result + + +def zset_score_pairs(response, **options): + """ + If ``withscores`` is specified in the options, return the response as + a list of (value, score) pairs + """ + if not response or not options.get('withscores'): + return response + score_cast_func = options.get('score_cast_func', float) + it = iter(response) + return list(izip(it, imap(score_cast_func, it))) + + +def sort_return_tuples(response, **options): + """ + If ``groups`` is specified, return the response as a list of + n-element tuples with n being the value found in options['groups'] + """ + if not response or not options.get('groups'): + return response + n = options['groups'] + return list(izip(*[response[i::n] for i in range(n)])) + + +def int_or_none(response): + if response is None: + return None + return int(response) + + +def nativestr_or_none(response): + if response is None: + return None + return nativestr(response) + + +def parse_stream_list(response): + if response is None: + return None + data = [] + for r in response: + if r is not None: + data.append((r[0], pairs_to_dict(r[1]))) + else: + data.append((None, None)) + return data + + +def pairs_to_dict_with_nativestr_keys(response): + return pairs_to_dict(response, decode_keys=True) + + +def parse_list_of_dicts(response): + return list(imap(pairs_to_dict_with_nativestr_keys, response)) + + +def parse_xclaim(response, **options): + if options.get('parse_justid', False): + return response + return parse_stream_list(response) + + +def parse_xinfo_stream(response): + data = pairs_to_dict(response, decode_keys=True) + first = data['first-entry'] + if first is not None: + data['first-entry'] = (first[0], pairs_to_dict(first[1])) + last = data['last-entry'] + if last is not None: + data['last-entry'] = (last[0], pairs_to_dict(last[1])) + return data + + +def parse_xread(response): + if response is None: + return [] + return [[r[0], parse_stream_list(r[1])] for r in response] + + +def parse_xpending(response, **options): + if options.get('parse_detail', False): + return parse_xpending_range(response) + consumers = [{'name': n, 'pending': long(p)} for n, p in response[3] or []] + return { + 'pending': response[0], + 'min': response[1], + 'max': response[2], + 'consumers': consumers + } + + +def parse_xpending_range(response): + k = ('message_id', 'consumer', 'time_since_delivered', 'times_delivered') + return [dict(izip(k, r)) for r in response] + + +def float_or_none(response): + if response is None: + return None + return float(response) + + +def bool_ok(response): + return nativestr(response) == 'OK' + + +def parse_zadd(response, **options): + if response is None: + return None + if options.get('as_score'): + return float(response) + return int(response) + + +def parse_client_list(response, **options): + clients = [] + for c in nativestr(response).splitlines(): + # Values might contain '=' + clients.append(dict(pair.split('=', 1) for pair in c.split(' '))) + return clients + + +def parse_config_get(response, **options): + response = [nativestr(i) if i is not None else None for i in response] + return response and pairs_to_dict(response) or {} + + +def parse_scan(response, **options): + cursor, r = response + return long(cursor), r + + +def parse_hscan(response, **options): + cursor, r = response + return long(cursor), r and pairs_to_dict(r) or {} + + +def parse_zscan(response, **options): + score_cast_func = options.get('score_cast_func', float) + cursor, r = response + it = iter(r) + return long(cursor), list(izip(it, imap(score_cast_func, it))) + + +def parse_slowlog_get(response, **options): + space = ' ' if options.get('decode_responses', False) else b' ' + return [{ + 'id': item[0], + 'start_time': int(item[1]), + 'duration': int(item[2]), + 'command': space.join(item[3]) + } for item in response] + + +def parse_cluster_info(response, **options): + response = nativestr(response) + return dict(line.split(':') for line in response.splitlines() if line) + + +def _parse_node_line(line): + line_items = line.split(' ') + node_id, addr, flags, master_id, ping, pong, epoch, \ + connected = line.split(' ')[:8] + slots = [sl.split('-') for sl in line_items[8:]] + node_dict = { + 'node_id': node_id, + 'flags': flags, + 'master_id': master_id, + 'last_ping_sent': ping, + 'last_pong_rcvd': pong, + 'epoch': epoch, + 'slots': slots, + 'connected': True if connected == 'connected' else False + } + return addr, node_dict + + +def parse_cluster_nodes(response, **options): + response = nativestr(response) + raw_lines = response + if isinstance(response, basestring): + raw_lines = response.splitlines() + return dict(_parse_node_line(line) for line in raw_lines) + + +def parse_georadius_generic(response, **options): + if options['store'] or options['store_dist']: + # `store` and `store_diff` cant be combined + # with other command arguments. + return response + + if type(response) != list: + response_list = [response] + else: + response_list = response + + if not options['withdist'] and not options['withcoord']\ + and not options['withhash']: + # just a bunch of places + return response_list + + cast = { + 'withdist': float, + 'withcoord': lambda ll: (float(ll[0]), float(ll[1])), + 'withhash': int + } + + # zip all output results with each casting functino to get + # the properly native Python value. + f = [lambda x: x] + f += [cast[o] for o in ['withdist', 'withhash', 'withcoord'] if options[o]] + return [ + list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list + ] + + +def parse_pubsub_numsub(response, **options): + return list(zip(response[0::2], response[1::2])) + + +def parse_client_kill(response, **options): + if isinstance(response, (long, int)): + return int(response) + return nativestr(response) == 'OK' + + +def parse_acl_getuser(response, **options): + if response is None: + return None + data = pairs_to_dict(response, decode_keys=True) + + # convert everything but user-defined data in 'keys' to native strings + data['flags'] = list(map(nativestr, data['flags'])) + data['passwords'] = list(map(nativestr, data['passwords'])) + data['commands'] = nativestr(data['commands']) + + # split 'commands' into separate 'categories' and 'commands' lists + commands, categories = [], [] + for command in data['commands'].split(' '): + if '@' in command: + categories.append(command) + else: + commands.append(command) + + data['commands'] = commands + data['categories'] = categories + data['enabled'] = 'on' in data['flags'] + return data + + +class Redis(object): + """ + Implementation of the Redis protocol. + + This abstract class provides a Python interface to all Redis commands + and an implementation of the Redis protocol. + + Connection and Pipeline derive from this, implementing how + the commands are sent and received to the Redis server + """ + RESPONSE_CALLBACKS = dict_merge( + string_keys_to_dict( + 'AUTH EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST ' + 'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX', + bool + ), + string_keys_to_dict( + 'BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN ' + 'HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD ' + 'SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN ' + 'SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM ' + 'ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE', + int + ), + string_keys_to_dict( + 'INCRBYFLOAT HINCRBYFLOAT', + float + ), + string_keys_to_dict( + # these return OK, or int if redis-server is >=1.3.4 + 'LPUSH RPUSH', + lambda r: isinstance(r, (long, int)) and r or nativestr(r) == 'OK' + ), + string_keys_to_dict('SORT', sort_return_tuples), + string_keys_to_dict('ZSCORE ZINCRBY GEODIST', float_or_none), + string_keys_to_dict( + 'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE ' + 'RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ', + bool_ok + ), + string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None), + string_keys_to_dict( + 'SDIFF SINTER SMEMBERS SUNION', + lambda r: r and set(r) or set() + ), + string_keys_to_dict( + 'ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE', + zset_score_pairs + ), + string_keys_to_dict('BZPOPMIN BZPOPMAX', \ + lambda r: r and (r[0], r[1], float(r[2])) or None), + string_keys_to_dict('ZRANK ZREVRANK', int_or_none), + string_keys_to_dict('XREVRANGE XRANGE', parse_stream_list), + string_keys_to_dict('XREAD XREADGROUP', parse_xread), + string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True), + { + 'ACL CAT': lambda r: list(map(nativestr, r)), + 'ACL DELUSER': int, + 'ACL GENPASS': nativestr, + 'ACL GETUSER': parse_acl_getuser, + 'ACL LIST': lambda r: list(map(nativestr, r)), + 'ACL LOAD': bool_ok, + 'ACL SAVE': bool_ok, + 'ACL SETUSER': bool_ok, + 'ACL USERS': lambda r: list(map(nativestr, r)), + 'ACL WHOAMI': nativestr, + 'CLIENT GETNAME': lambda r: r and nativestr(r), + 'CLIENT ID': int, + 'CLIENT KILL': parse_client_kill, + 'CLIENT LIST': parse_client_list, + 'CLIENT SETNAME': bool_ok, + 'CLIENT UNBLOCK': lambda r: r and int(r) == 1 or False, + 'CLIENT PAUSE': bool_ok, + 'CLUSTER ADDSLOTS': bool_ok, + 'CLUSTER COUNT-FAILURE-REPORTS': lambda x: int(x), + 'CLUSTER COUNTKEYSINSLOT': lambda x: int(x), + 'CLUSTER DELSLOTS': bool_ok, + 'CLUSTER FAILOVER': bool_ok, + 'CLUSTER FORGET': bool_ok, + 'CLUSTER INFO': parse_cluster_info, + 'CLUSTER KEYSLOT': lambda x: int(x), + 'CLUSTER MEET': bool_ok, + 'CLUSTER NODES': parse_cluster_nodes, + 'CLUSTER REPLICATE': bool_ok, + 'CLUSTER RESET': bool_ok, + 'CLUSTER SAVECONFIG': bool_ok, + 'CLUSTER SET-CONFIG-EPOCH': bool_ok, + 'CLUSTER SETSLOT': bool_ok, + 'CLUSTER SLAVES': parse_cluster_nodes, + 'CONFIG GET': parse_config_get, + 'CONFIG RESETSTAT': bool_ok, + 'CONFIG SET': bool_ok, + 'DEBUG OBJECT': parse_debug_object, + 'GEOHASH': lambda r: list(map(nativestr_or_none, r)), + 'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]), + float(ll[1])) + if ll is not None else None, r)), + 'GEORADIUS': parse_georadius_generic, + 'GEORADIUSBYMEMBER': parse_georadius_generic, + 'HGETALL': lambda r: r and pairs_to_dict(r) or {}, + 'HSCAN': parse_hscan, + 'INFO': parse_info, + 'LASTSAVE': timestamp_to_datetime, + 'MEMORY PURGE': bool_ok, + 'MEMORY STATS': parse_memory_stats, + 'MEMORY USAGE': int_or_none, + 'OBJECT': parse_object, + 'PING': lambda r: nativestr(r) == 'PONG', + 'PUBSUB NUMSUB': parse_pubsub_numsub, + 'RANDOMKEY': lambda r: r and r or None, + 'SCAN': parse_scan, + 'SCRIPT EXISTS': lambda r: list(imap(bool, r)), + 'SCRIPT FLUSH': bool_ok, + 'SCRIPT KILL': bool_ok, + 'SCRIPT LOAD': nativestr, + 'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master, + 'SENTINEL MASTER': parse_sentinel_master, + 'SENTINEL MASTERS': parse_sentinel_masters, + 'SENTINEL MONITOR': bool_ok, + 'SENTINEL REMOVE': bool_ok, + 'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels, + 'SENTINEL SET': bool_ok, + 'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels, + 'SET': lambda r: r and nativestr(r) == 'OK', + 'SLOWLOG GET': parse_slowlog_get, + 'SLOWLOG LEN': int, + 'SLOWLOG RESET': bool_ok, + 'SSCAN': parse_scan, + 'TIME': lambda x: (int(x[0]), int(x[1])), + 'XCLAIM': parse_xclaim, + 'XGROUP CREATE': bool_ok, + 'XGROUP DELCONSUMER': int, + 'XGROUP DESTROY': bool, + 'XGROUP SETID': bool_ok, + 'XINFO CONSUMERS': parse_list_of_dicts, + 'XINFO GROUPS': parse_list_of_dicts, + 'XINFO STREAM': parse_xinfo_stream, + 'XPENDING': parse_xpending, + 'ZADD': parse_zadd, + 'ZSCAN': parse_zscan, + } + ) + + @classmethod + def from_url(cls, url, db=None, **kwargs): + """ + Return a Redis client object configured from the given URL + + For example:: + + redis://[[username]:[password]]@localhost:6379/0 + rediss://[[username]:[password]]@localhost:6379/0 + unix://[[username]:[password]]@/path/to/socket.sock?db=0 + + Three URL schemes are supported: + + - ```redis://`` + `_ creates a + normal TCP socket connection + - ```rediss://`` + `_ creates a + SSL wrapped TCP socket connection + - ``unix://`` creates a Unix Domain Socket connection + + There are several ways to specify a database number. The parse function + will return the first specified option: + 1. A ``db`` querystring option, e.g. redis://localhost?db=0 + 2. If using the redis:// scheme, the path argument of the url, e.g. + redis://localhost/0 + 3. The ``db`` argument to this function. + + If none of these options are specified, db=0 is used. + + Any additional querystring arguments and keyword arguments will be + passed along to the ConnectionPool class's initializer. In the case + of conflicting arguments, querystring arguments always win. + """ + connection_pool = ConnectionPool.from_url(url, db=db, **kwargs) + return cls(connection_pool=connection_pool) + + def __init__(self, host='localhost', port=6379, + db=0, password=None, socket_timeout=None, + socket_connect_timeout=None, + socket_keepalive=None, socket_keepalive_options=None, + connection_pool=None, unix_socket_path=None, + encoding='utf-8', encoding_errors='strict', + charset=None, errors=None, + decode_responses=False, retry_on_timeout=False, + ssl=False, ssl_keyfile=None, ssl_certfile=None, + ssl_cert_reqs='required', ssl_ca_certs=None, + ssl_check_hostname=False, + max_connections=None, single_connection_client=False, + health_check_interval=0, client_name=None, username=None): + if not connection_pool: + if charset is not None: + warnings.warn(DeprecationWarning( + '"charset" is deprecated. Use "encoding" instead')) + encoding = charset + if errors is not None: + warnings.warn(DeprecationWarning( + '"errors" is deprecated. Use "encoding_errors" instead')) + encoding_errors = errors + + kwargs = { + 'db': db, + 'username': username, + 'password': password, + 'socket_timeout': socket_timeout, + 'encoding': encoding, + 'encoding_errors': encoding_errors, + 'decode_responses': decode_responses, + 'retry_on_timeout': retry_on_timeout, + 'max_connections': max_connections, + 'health_check_interval': health_check_interval, + 'client_name': client_name + } + # based on input, setup appropriate connection args + if unix_socket_path is not None: + kwargs.update({ + 'path': unix_socket_path, + 'connection_class': UnixDomainSocketConnection + }) + else: + # TCP specific options + kwargs.update({ + 'host': host, + 'port': port, + 'socket_connect_timeout': socket_connect_timeout, + 'socket_keepalive': socket_keepalive, + 'socket_keepalive_options': socket_keepalive_options, + }) + + if ssl: + kwargs.update({ + 'connection_class': SSLConnection, + 'ssl_keyfile': ssl_keyfile, + 'ssl_certfile': ssl_certfile, + 'ssl_cert_reqs': ssl_cert_reqs, + 'ssl_ca_certs': ssl_ca_certs, + 'ssl_check_hostname': ssl_check_hostname, + }) + connection_pool = ConnectionPool(**kwargs) + self.connection_pool = connection_pool + self.connection = None + if single_connection_client: + self.connection = self.connection_pool.get_connection('_') + + self.response_callbacks = CaseInsensitiveDict( + self.__class__.RESPONSE_CALLBACKS) + + def __repr__(self): + return "%s<%s>" % (type(self).__name__, repr(self.connection_pool)) + + def set_response_callback(self, command, callback): + "Set a custom Response Callback" + self.response_callbacks[command] = callback + + def pipeline(self, transaction=True, shard_hint=None): + """ + Return a new pipeline object that can queue multiple commands for + later execution. ``transaction`` indicates whether all commands + should be executed atomically. Apart from making a group of operations + atomic, pipelines are useful for reducing the back-and-forth overhead + between the client and server. + """ + return Pipeline( + self.connection_pool, + self.response_callbacks, + transaction, + shard_hint) + + def transaction(self, func, *watches, **kwargs): + """ + Convenience method for executing the callable `func` as a transaction + while watching all keys specified in `watches`. The 'func' callable + should expect a single argument which is a Pipeline object. + """ + shard_hint = kwargs.pop('shard_hint', None) + value_from_callable = kwargs.pop('value_from_callable', False) + watch_delay = kwargs.pop('watch_delay', None) + with self.pipeline(True, shard_hint) as pipe: + while True: + try: + if watches: + pipe.watch(*watches) + func_value = func(pipe) + exec_value = pipe.execute() + return func_value if value_from_callable else exec_value + except WatchError: + if watch_delay is not None and watch_delay > 0: + time.sleep(watch_delay) + continue + + def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None, + lock_class=None, thread_local=True): + """ + Return a new Lock object using key ``name`` that mimics + the behavior of threading.Lock. + + If specified, ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + + ``blocking_timeout`` indicates the maximum amount of time in seconds to + spend trying to acquire the lock. A value of ``None`` indicates + continue trying forever. ``blocking_timeout`` can be specified as a + float or integer, both representing the number of seconds to wait. + + ``lock_class`` forces the specified lock implementation. + + ``thread_local`` indicates whether the lock token is placed in + thread-local storage. By default, the token is placed in thread local + storage so that a thread only sees its token, not a token set by + another thread. Consider the following timeline: + + time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. + thread-1 sets the token to "abc" + time: 1, thread-2 blocks trying to acquire `my-lock` using the + Lock instance. + time: 5, thread-1 has not yet completed. redis expires the lock + key. + time: 5, thread-2 acquired `my-lock` now that it's available. + thread-2 sets the token to "xyz" + time: 6, thread-1 finishes its work and calls release(). if the + token is *not* stored in thread local storage, then + thread-1 would see the token value as "xyz" and would be + able to successfully release the thread-2's lock. + + In some use cases it's necessary to disable thread local storage. For + example, if you have code where one thread acquires a lock and passes + that lock instance to a worker thread to release later. If thread + local storage isn't disabled in this case, the worker thread won't see + the token set by the thread that acquired the lock. Our assumption + is that these cases aren't common and as such default to using + thread local storage. """ + if lock_class is None: + lock_class = Lock + return lock_class(self, name, timeout=timeout, sleep=sleep, + blocking_timeout=blocking_timeout, + thread_local=thread_local) + + def pubsub(self, **kwargs): + """ + Return a Publish/Subscribe object. With this object, you can + subscribe to channels and listen for messages that get published to + them. + """ + return PubSub(self.connection_pool, **kwargs) + + def monitor(self): + return Monitor(self.connection_pool) + + def client(self): + return self.__class__(connection_pool=self.connection_pool, + single_connection_client=True) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def __del__(self): + self.close() + + def close(self): + conn = self.connection + if conn: + self.connection = None + self.connection_pool.release(conn) + + # COMMAND EXECUTION AND PROTOCOL PARSING + def execute_command(self, *args, **options): + "Execute a command and return a parsed response" + pool = self.connection_pool + command_name = args[0] + conn = self.connection or pool.get_connection(command_name, **options) + try: + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + except (ConnectionError, TimeoutError) as e: + conn.disconnect() + if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): + raise + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + finally: + if not self.connection: + pool.release(conn) + + def parse_response(self, connection, command_name, **options): + "Parses a response from the Redis server" + try: + response = connection.read_response() + except ResponseError: + if EMPTY_RESPONSE in options: + return options[EMPTY_RESPONSE] + raise + if command_name in self.response_callbacks: + return self.response_callbacks[command_name](response, **options) + return response + + # SERVER INFORMATION + + # ACL methods + def acl_cat(self, category=None): + """ + Returns a list of categories or commands within a category. + + If ``category`` is not supplied, returns a list of all categories. + If ``category`` is supplied, returns a list of all commands within + that category. + """ + pieces = [category] if category else [] + return self.execute_command('ACL CAT', *pieces) + + def acl_deluser(self, username): + "Delete the ACL for the specified ``username``" + return self.execute_command('ACL DELUSER', username) + + def acl_genpass(self): + "Generate a random password value" + return self.execute_command('ACL GENPASS') + + def acl_getuser(self, username): + """ + Get the ACL details for the specified ``username``. + + If ``username`` does not exist, return None + """ + return self.execute_command('ACL GETUSER', username) + + def acl_list(self): + "Return a list of all ACLs on the server" + return self.execute_command('ACL LIST') + + def acl_load(self): + """ + Load ACL rules from the configured ``aclfile``. + + Note that the server must be configured with the ``aclfile`` + directive to be able to load ACL rules from an aclfile. + """ + return self.execute_command('ACL LOAD') + + def acl_save(self): + """ + Save ACL rules to the configured ``aclfile``. + + Note that the server must be configured with the ``aclfile`` + directive to be able to save ACL rules to an aclfile. + """ + return self.execute_command('ACL SAVE') + + def acl_setuser(self, username, enabled=False, nopass=False, + passwords=None, hashed_passwords=None, categories=None, + commands=None, keys=None, reset=False, reset_keys=False, + reset_passwords=False): + """ + Create or update an ACL user. + + Create or update the ACL for ``username``. If the user already exists, + the existing ACL is completely overwritten and replaced with the + specified values. + + ``enabled`` is a boolean indicating whether the user should be allowed + to authenticate or not. Defaults to ``False``. + + ``nopass`` is a boolean indicating whether the can authenticate without + a password. This cannot be True if ``passwords`` are also specified. + + ``passwords`` if specified is a list of plain text passwords + to add to or remove from the user. Each password must be prefixed with + a '+' to add or a '-' to remove. For convenience, the value of + ``add_passwords`` can be a simple prefixed string when adding or + removing a single password. + + ``hashed_passwords`` if specified is a list of SHA-256 hashed passwords + to add to or remove from the user. Each hashed password must be + prefixed with a '+' to add or a '-' to remove. For convenience, + the value of ``hashed_passwords`` can be a simple prefixed string when + adding or removing a single password. + + ``categories`` if specified is a list of strings representing category + permissions. Each string must be prefixed with either a '+' to add the + category permission or a '-' to remove the category permission. + + ``commands`` if specified is a list of strings representing command + permissions. Each string must be prefixed with either a '+' to add the + command permission or a '-' to remove the command permission. + + ``keys`` if specified is a list of key patterns to grant the user + access to. Keys patterns allow '*' to support wildcard matching. For + example, '*' grants access to all keys while 'cache:*' grants access + to all keys that are prefixed with 'cache:'. ``keys`` should not be + prefixed with a '~'. + + ``reset`` is a boolean indicating whether the user should be fully + reset prior to applying the new ACL. Setting this to True will + remove all existing passwords, flags and privileges from the user and + then apply the specified rules. If this is False, the user's existing + passwords, flags and privileges will be kept and any new specified + rules will be applied on top. + + ``reset_keys`` is a boolean indicating whether the user's key + permissions should be reset prior to applying any new key permissions + specified in ``keys``. If this is False, the user's existing + key permissions will be kept and any new specified key permissions + will be applied on top. + + ``reset_passwords`` is a boolean indicating whether to remove all + existing passwords and the 'nopass' flag from the user prior to + applying any new passwords specified in 'passwords' or + 'hashed_passwords'. If this is False, the user's existing passwords + and 'nopass' status will be kept and any new specified passwords + or hashed_passwords will be applied on top. + """ + encoder = self.connection_pool.get_encoder() + pieces = [username] + + if reset: + pieces.append(b'reset') + + if reset_keys: + pieces.append(b'resetkeys') + + if reset_passwords: + pieces.append(b'resetpass') + + if enabled: + pieces.append(b'on') + else: + pieces.append(b'off') + + if (passwords or hashed_passwords) and nopass: + raise DataError('Cannot set \'nopass\' and supply ' + '\'passwords\' or \'hashed_passwords\'') + + if passwords: + # as most users will have only one password, allow remove_passwords + # to be specified as a simple string or a list + passwords = list_or_args(passwords, []) + for i, password in enumerate(passwords): + password = encoder.encode(password) + if password.startswith(b'+'): + pieces.append(b'>%s' % password[1:]) + elif password.startswith(b'-'): + pieces.append(b'<%s' % password[1:]) + else: + raise DataError('Password %d must be prefixeed with a ' + '"+" to add or a "-" to remove' % i) + + if hashed_passwords: + # as most users will have only one password, allow remove_passwords + # to be specified as a simple string or a list + hashed_passwords = list_or_args(hashed_passwords, []) + for i, hashed_password in enumerate(hashed_passwords): + hashed_password = encoder.encode(hashed_password) + if hashed_password.startswith(b'+'): + pieces.append(b'#%s' % hashed_password[1:]) + elif hashed_password.startswith(b'-'): + pieces.append(b'!%s' % hashed_password[1:]) + else: + raise DataError('Hashed %d password must be prefixeed ' + 'with a "+" to add or a "-" to remove' % i) + + if nopass: + pieces.append(b'nopass') + + if categories: + for category in categories: + category = encoder.encode(category) + # categories can be prefixed with one of (+@, +, -@, -) + if category.startswith(b'+@'): + pieces.append(category) + elif category.startswith(b'+'): + pieces.append(b'+@%s' % category[1:]) + elif category.startswith(b'-@'): + pieces.append(category) + elif category.startswith(b'-'): + pieces.append(b'-@%s' % category[1:]) + else: + raise DataError('Category "%s" must be prefixed with ' + '"+" or "-"' + % encoder.decode(category, force=True)) + if commands: + for cmd in commands: + cmd = encoder.encode(cmd) + if not cmd.startswith(b'+') and not cmd.startswith(b'-'): + raise DataError('Command "%s" must be prefixed with ' + '"+" or "-"' + % encoder.decode(cmd, force=True)) + pieces.append(cmd) + + if keys: + for key in keys: + key = encoder.encode(key) + pieces.append(b'~%s' % key) + + return self.execute_command('ACL SETUSER', *pieces) + + def acl_users(self): + "Returns a list of all registered users on the server." + return self.execute_command('ACL USERS') + + def acl_whoami(self): + "Get the username for the current connection" + return self.execute_command('ACL WHOAMI') + + def bgrewriteaof(self): + "Tell the Redis server to rewrite the AOF file from data in memory." + return self.execute_command('BGREWRITEAOF') + + def bgsave(self): + """ + Tell the Redis server to save its data to disk. Unlike save(), + this method is asynchronous and returns immediately. + """ + return self.execute_command('BGSAVE') + + def client_kill(self, address): + "Disconnects the client at ``address`` (ip:port)" + return self.execute_command('CLIENT KILL', address) + + def client_kill_filter(self, _id=None, _type=None, addr=None, skipme=None): + """ + Disconnects client(s) using a variety of filter options + :param id: Kills a client by its unique ID field + :param type: Kills a client by type where type is one of 'normal', + 'master', 'slave' or 'pubsub' + :param addr: Kills a client by its 'address:port' + :param skipme: If True, then the client calling the command + will not get killed even if it is identified by one of the filter + options. If skipme is not provided, the server defaults to skipme=True + """ + args = [] + if _type is not None: + client_types = ('normal', 'master', 'slave', 'pubsub') + if str(_type).lower() not in client_types: + raise DataError("CLIENT KILL type must be one of %r" % ( + client_types,)) + args.extend((b'TYPE', _type)) + if skipme is not None: + if not isinstance(skipme, bool): + raise DataError("CLIENT KILL skipme must be a bool") + if skipme: + args.extend((b'SKIPME', b'YES')) + else: + args.extend((b'SKIPME', b'NO')) + if _id is not None: + args.extend((b'ID', _id)) + if addr is not None: + args.extend((b'ADDR', addr)) + if not args: + raise DataError("CLIENT KILL ... ... " + " must specify at least one filter") + return self.execute_command('CLIENT KILL', *args) + + def client_list(self, _type=None): + """ + Returns a list of currently connected clients. + If type of client specified, only that type will be returned. + :param _type: optional. one of the client types (normal, master, + replica, pubsub) + """ + "Returns a list of currently connected clients" + if _type is not None: + client_types = ('normal', 'master', 'replica', 'pubsub') + if str(_type).lower() not in client_types: + raise DataError("CLIENT LIST _type must be one of %r" % ( + client_types,)) + return self.execute_command('CLIENT LIST', b'TYPE', _type) + return self.execute_command('CLIENT LIST') + + def client_getname(self): + "Returns the current connection name" + return self.execute_command('CLIENT GETNAME') + + def client_id(self): + "Returns the current connection id" + return self.execute_command('CLIENT ID') + + def client_setname(self, name): + "Sets the current connection name" + return self.execute_command('CLIENT SETNAME', name) + + def client_unblock(self, client_id, error=False): + """ + Unblocks a connection by its client id. + If ``error`` is True, unblocks the client with a special error message. + If ``error`` is False (default), the client is unblocked using the + regular timeout mechanism. + """ + args = ['CLIENT UNBLOCK', int(client_id)] + if error: + args.append(b'ERROR') + return self.execute_command(*args) + + def client_pause(self, timeout): + """ + Suspend all the Redis clients for the specified amount of time + :param timeout: milliseconds to pause clients + """ + if not isinstance(timeout, (int, long)): + raise DataError("CLIENT PAUSE timeout must be an integer") + return self.execute_command('CLIENT PAUSE', str(timeout)) + + def readwrite(self): + "Disables read queries for a connection to a Redis Cluster slave node" + return self.execute_command('READWRITE') + + def readonly(self): + "Enables read queries for a connection to a Redis Cluster replica node" + return self.execute_command('READONLY') + + def config_get(self, pattern="*"): + "Return a dictionary of configuration based on the ``pattern``" + return self.execute_command('CONFIG GET', pattern) + + def config_set(self, name, value): + "Set config item ``name`` with ``value``" + return self.execute_command('CONFIG SET', name, value) + + def config_resetstat(self): + "Reset runtime statistics" + return self.execute_command('CONFIG RESETSTAT') + + def config_rewrite(self): + "Rewrite config file with the minimal change to reflect running config" + return self.execute_command('CONFIG REWRITE') + + def dbsize(self): + "Returns the number of keys in the current database" + return self.execute_command('DBSIZE') + + def debug_object(self, key): + "Returns version specific meta information about a given key" + return self.execute_command('DEBUG OBJECT', key) + + def echo(self, value): + "Echo the string back from the server" + return self.execute_command('ECHO', value) + + def flushall(self, asynchronous=False): + """ + Delete all keys in all databases on the current host. + + ``asynchronous`` indicates whether the operation is + executed asynchronously by the server. + """ + args = [] + if asynchronous: + args.append(b'ASYNC') + return self.execute_command('FLUSHALL', *args) + + def flushdb(self, asynchronous=False): + """ + Delete all keys in the current database. + + ``asynchronous`` indicates whether the operation is + executed asynchronously by the server. + """ + args = [] + if asynchronous: + args.append(b'ASYNC') + return self.execute_command('FLUSHDB', *args) + + def swapdb(self, first, second): + "Swap two databases" + return self.execute_command('SWAPDB', first, second) + + def info(self, section=None): + """ + Returns a dictionary containing information about the Redis server + + The ``section`` option can be used to select a specific section + of information + + The section option is not supported by older versions of Redis Server, + and will generate ResponseError + """ + if section is None: + return self.execute_command('INFO') + else: + return self.execute_command('INFO', section) + + def lastsave(self): + """ + Return a Python datetime object representing the last time the + Redis database was saved to disk + """ + return self.execute_command('LASTSAVE') + + def migrate(self, host, port, keys, destination_db, timeout, + copy=False, replace=False, auth=None): + """ + Migrate 1 or more keys from the current Redis server to a different + server specified by the ``host``, ``port`` and ``destination_db``. + + The ``timeout``, specified in milliseconds, indicates the maximum + time the connection between the two servers can be idle before the + command is interrupted. + + If ``copy`` is True, the specified ``keys`` are NOT deleted from + the source server. + + If ``replace`` is True, this operation will overwrite the keys + on the destination server if they exist. + + If ``auth`` is specified, authenticate to the destination server with + the password provided. + """ + keys = list_or_args(keys, []) + if not keys: + raise DataError('MIGRATE requires at least one key') + pieces = [] + if copy: + pieces.append(b'COPY') + if replace: + pieces.append(b'REPLACE') + if auth: + pieces.append(b'AUTH') + pieces.append(auth) + pieces.append(b'KEYS') + pieces.extend(keys) + return self.execute_command('MIGRATE', host, port, '', destination_db, + timeout, *pieces) + + def object(self, infotype, key): + "Return the encoding, idletime, or refcount about the key" + return self.execute_command('OBJECT', infotype, key, infotype=infotype) + + def memory_stats(self): + "Return a dictionary of memory stats" + return self.execute_command('MEMORY STATS') + + def memory_usage(self, key, samples=None): + """ + Return the total memory usage for key, its value and associated + administrative overheads. + + For nested data structures, ``samples`` is the number of elements to + sample. If left unspecified, the server's default is 5. Use 0 to sample + all elements. + """ + args = [] + if isinstance(samples, int): + args.extend([b'SAMPLES', samples]) + return self.execute_command('MEMORY USAGE', key, *args) + + def memory_purge(self): + "Attempts to purge dirty pages for reclamation by allocator" + return self.execute_command('MEMORY PURGE') + + def ping(self): + "Ping the Redis server" + return self.execute_command('PING') + + def save(self): + """ + Tell the Redis server to save its data to disk, + blocking until the save is complete + """ + return self.execute_command('SAVE') + + def sentinel(self, *args): + "Redis Sentinel's SENTINEL command." + warnings.warn( + DeprecationWarning('Use the individual sentinel_* methods')) + + def sentinel_get_master_addr_by_name(self, service_name): + "Returns a (host, port) pair for the given ``service_name``" + return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME', + service_name) + + def sentinel_master(self, service_name): + "Returns a dictionary containing the specified masters state." + return self.execute_command('SENTINEL MASTER', service_name) + + def sentinel_masters(self): + "Returns a list of dictionaries containing each master's state." + return self.execute_command('SENTINEL MASTERS') + + def sentinel_monitor(self, name, ip, port, quorum): + "Add a new master to Sentinel to be monitored" + return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum) + + def sentinel_remove(self, name): + "Remove a master from Sentinel's monitoring" + return self.execute_command('SENTINEL REMOVE', name) + + def sentinel_sentinels(self, service_name): + "Returns a list of sentinels for ``service_name``" + return self.execute_command('SENTINEL SENTINELS', service_name) + + def sentinel_set(self, name, option, value): + "Set Sentinel monitoring parameters for a given master" + return self.execute_command('SENTINEL SET', name, option, value) + + def sentinel_slaves(self, service_name): + "Returns a list of slaves for ``service_name``" + return self.execute_command('SENTINEL SLAVES', service_name) + + def shutdown(self, save=False, nosave=False): + """Shutdown the Redis server. If Redis has persistence configured, + data will be flushed before shutdown. If the "save" option is set, + a data flush will be attempted even if there is no persistence + configured. If the "nosave" option is set, no data flush will be + attempted. The "save" and "nosave" options cannot both be set. + """ + if save and nosave: + raise DataError('SHUTDOWN save and nosave cannot both be set') + args = ['SHUTDOWN'] + if save: + args.append('SAVE') + if nosave: + args.append('NOSAVE') + try: + self.execute_command(*args) + except ConnectionError: + # a ConnectionError here is expected + return + raise RedisError("SHUTDOWN seems to have failed.") + + def slaveof(self, host=None, port=None): + """ + Set the server to be a replicated slave of the instance identified + by the ``host`` and ``port``. If called without arguments, the + instance is promoted to a master instead. + """ + if host is None and port is None: + return self.execute_command('SLAVEOF', b'NO', b'ONE') + return self.execute_command('SLAVEOF', host, port) + + def slowlog_get(self, num=None): + """ + Get the entries from the slowlog. If ``num`` is specified, get the + most recent ``num`` items. + """ + args = ['SLOWLOG GET'] + if num is not None: + args.append(num) + decode_responses = self.connection_pool.connection_kwargs.get( + 'decode_responses', False) + return self.execute_command(*args, decode_responses=decode_responses) + + def slowlog_len(self): + "Get the number of items in the slowlog" + return self.execute_command('SLOWLOG LEN') + + def slowlog_reset(self): + "Remove all items in the slowlog" + return self.execute_command('SLOWLOG RESET') + + def time(self): + """ + Returns the server time as a 2-item tuple of ints: + (seconds since epoch, microseconds into this second). + """ + return self.execute_command('TIME') + + def wait(self, num_replicas, timeout): + """ + Redis synchronous replication + That returns the number of replicas that processed the query when + we finally have at least ``num_replicas``, or when the ``timeout`` was + reached. + """ + return self.execute_command('WAIT', num_replicas, timeout) + + # BASIC KEY COMMANDS + def append(self, key, value): + """ + Appends the string ``value`` to the value at ``key``. If ``key`` + doesn't already exist, create it with a value of ``value``. + Returns the new length of the value at ``key``. + """ + return self.execute_command('APPEND', key, value) + + def bitcount(self, key, start=None, end=None): + """ + Returns the count of set bits in the value of ``key``. Optional + ``start`` and ``end`` paramaters indicate which bytes to consider + """ + params = [key] + if start is not None and end is not None: + params.append(start) + params.append(end) + elif (start is not None and end is None) or \ + (end is not None and start is None): + raise DataError("Both start and end must be specified") + return self.execute_command('BITCOUNT', *params) + + def bitfield(self, key, default_overflow=None): + """ + Return a BitFieldOperation instance to conveniently construct one or + more bitfield operations on ``key``. + """ + return BitFieldOperation(self, key, default_overflow=default_overflow) + + def bitop(self, operation, dest, *keys): + """ + Perform a bitwise operation using ``operation`` between ``keys`` and + store the result in ``dest``. + """ + return self.execute_command('BITOP', operation, dest, *keys) + + def bitpos(self, key, bit, start=None, end=None): + """ + Return the position of the first bit set to 1 or 0 in a string. + ``start`` and ``end`` difines search range. The range is interpreted + as a range of bytes and not a range of bits, so start=0 and end=2 + means to look at the first three bytes. + """ + if bit not in (0, 1): + raise DataError('bit must be 0 or 1') + params = [key, bit] + + start is not None and params.append(start) + + if start is not None and end is not None: + params.append(end) + elif start is None and end is not None: + raise DataError("start argument is not set, " + "when end is specified") + return self.execute_command('BITPOS', *params) + + def decr(self, name, amount=1): + """ + Decrements the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as 0 - ``amount`` + """ + # An alias for ``decr()``, because it is already implemented + # as DECRBY redis command. + return self.decrby(name, amount) + + def decrby(self, name, amount=1): + """ + Decrements the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as 0 - ``amount`` + """ + return self.execute_command('DECRBY', name, amount) + + def delete(self, *names): + "Delete one or more keys specified by ``names``" + return self.execute_command('DEL', *names) + + def __delitem__(self, name): + self.delete(name) + + def dump(self, name): + """ + Return a serialized version of the value stored at the specified key. + If key does not exist a nil bulk reply is returned. + """ + return self.execute_command('DUMP', name) + + def exists(self, *names): + "Returns the number of ``names`` that exist" + return self.execute_command('EXISTS', *names) + __contains__ = exists + + def expire(self, name, time): + """ + Set an expire flag on key ``name`` for ``time`` seconds. ``time`` + can be represented by an integer or a Python timedelta object. + """ + if isinstance(time, datetime.timedelta): + time = int(time.total_seconds()) + return self.execute_command('EXPIRE', name, time) + + def expireat(self, name, when): + """ + Set an expire flag on key ``name``. ``when`` can be represented + as an integer indicating unix time or a Python datetime object. + """ + if isinstance(when, datetime.datetime): + when = int(mod_time.mktime(when.timetuple())) + return self.execute_command('EXPIREAT', name, when) + + def get(self, name): + """ + Return the value at key ``name``, or None if the key doesn't exist + """ + return self.execute_command('GET', name) + + def __getitem__(self, name): + """ + Return the value at key ``name``, raises a KeyError if the key + doesn't exist. + """ + value = self.get(name) + if value is not None: + return value + raise KeyError(name) + + def getbit(self, name, offset): + "Returns a boolean indicating the value of ``offset`` in ``name``" + return self.execute_command('GETBIT', name, offset) + + def getrange(self, key, start, end): + """ + Returns the substring of the string value stored at ``key``, + determined by the offsets ``start`` and ``end`` (both are inclusive) + """ + return self.execute_command('GETRANGE', key, start, end) + + def getset(self, name, value): + """ + Sets the value at key ``name`` to ``value`` + and returns the old value at key ``name`` atomically. + """ + return self.execute_command('GETSET', name, value) + + def incr(self, name, amount=1): + """ + Increments the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as ``amount`` + """ + return self.incrby(name, amount) + + def incrby(self, name, amount=1): + """ + Increments the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as ``amount`` + """ + # An alias for ``incr()``, because it is already implemented + # as INCRBY redis command. + return self.execute_command('INCRBY', name, amount) + + def incrbyfloat(self, name, amount=1.0): + """ + Increments the value at key ``name`` by floating ``amount``. + If no key exists, the value will be initialized as ``amount`` + """ + return self.execute_command('INCRBYFLOAT', name, amount) + + def keys(self, pattern='*'): + "Returns a list of keys matching ``pattern``" + return self.execute_command('KEYS', pattern) + + def mget(self, keys, *args): + """ + Returns a list of values ordered identically to ``keys`` + """ + args = list_or_args(keys, args) + options = {} + if not args: + options[EMPTY_RESPONSE] = [] + return self.execute_command('MGET', *args, **options) + + def mset(self, mapping): + """ + Sets key/values based on a mapping. Mapping is a dictionary of + key/value pairs. Both keys and values should be strings or types that + can be cast to a string via str(). + """ + items = [] + for pair in iteritems(mapping): + items.extend(pair) + return self.execute_command('MSET', *items) + + def msetnx(self, mapping): + """ + Sets key/values based on a mapping if none of the keys are already set. + Mapping is a dictionary of key/value pairs. Both keys and values + should be strings or types that can be cast to a string via str(). + Returns a boolean indicating if the operation was successful. + """ + items = [] + for pair in iteritems(mapping): + items.extend(pair) + return self.execute_command('MSETNX', *items) + + def move(self, name, db): + "Moves the key ``name`` to a different Redis database ``db``" + return self.execute_command('MOVE', name, db) + + def persist(self, name): + "Removes an expiration on ``name``" + return self.execute_command('PERSIST', name) + + def pexpire(self, name, time): + """ + Set an expire flag on key ``name`` for ``time`` milliseconds. + ``time`` can be represented by an integer or a Python timedelta + object. + """ + if isinstance(time, datetime.timedelta): + time = int(time.total_seconds() * 1000) + return self.execute_command('PEXPIRE', name, time) + + def pexpireat(self, name, when): + """ + Set an expire flag on key ``name``. ``when`` can be represented + as an integer representing unix time in milliseconds (unix time * 1000) + or a Python datetime object. + """ + if isinstance(when, datetime.datetime): + ms = int(when.microsecond / 1000) + when = int(mod_time.mktime(when.timetuple())) * 1000 + ms + return self.execute_command('PEXPIREAT', name, when) + + def psetex(self, name, time_ms, value): + """ + Set the value of key ``name`` to ``value`` that expires in ``time_ms`` + milliseconds. ``time_ms`` can be represented by an integer or a Python + timedelta object + """ + if isinstance(time_ms, datetime.timedelta): + time_ms = int(time_ms.total_seconds() * 1000) + return self.execute_command('PSETEX', name, time_ms, value) + + def pttl(self, name): + "Returns the number of milliseconds until the key ``name`` will expire" + return self.execute_command('PTTL', name) + + def randomkey(self): + "Returns the name of a random key" + return self.execute_command('RANDOMKEY') + + def rename(self, src, dst): + """ + Rename key ``src`` to ``dst`` + """ + return self.execute_command('RENAME', src, dst) + + def renamenx(self, src, dst): + "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist" + return self.execute_command('RENAMENX', src, dst) + + def restore(self, name, ttl, value, replace=False): + """ + Create a key using the provided serialized value, previously obtained + using DUMP. + """ + params = [name, ttl, value] + if replace: + params.append('REPLACE') + return self.execute_command('RESTORE', *params) + + def set(self, name, value, + ex=None, px=None, nx=False, xx=False, keepttl=False): + """ + Set the value at key ``name`` to ``value`` + + ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds. + + ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds. + + ``nx`` if set to True, set the value at key ``name`` to ``value`` only + if it does not exist. + + ``xx`` if set to True, set the value at key ``name`` to ``value`` only + if it already exists. + + ``keepttl`` if True, retain the time to live associated with the key. + (Available since Redis 6.0) + """ + pieces = [name, value] + if ex is not None: + pieces.append('EX') + if isinstance(ex, datetime.timedelta): + ex = int(ex.total_seconds()) + pieces.append(ex) + if px is not None: + pieces.append('PX') + if isinstance(px, datetime.timedelta): + px = int(px.total_seconds() * 1000) + pieces.append(px) + + if nx: + pieces.append('NX') + if xx: + pieces.append('XX') + + if keepttl: + pieces.append('KEEPTTL') + + return self.execute_command('SET', *pieces) + + def __setitem__(self, name, value): + self.set(name, value) + + def setbit(self, name, offset, value): + """ + Flag the ``offset`` in ``name`` as ``value``. Returns a boolean + indicating the previous value of ``offset``. + """ + value = value and 1 or 0 + return self.execute_command('SETBIT', name, offset, value) + + def setex(self, name, time, value): + """ + Set the value of key ``name`` to ``value`` that expires in ``time`` + seconds. ``time`` can be represented by an integer or a Python + timedelta object. + """ + if isinstance(time, datetime.timedelta): + time = int(time.total_seconds()) + return self.execute_command('SETEX', name, time, value) + + def setnx(self, name, value): + "Set the value of key ``name`` to ``value`` if key doesn't exist" + return self.execute_command('SETNX', name, value) + + def setrange(self, name, offset, value): + """ + Overwrite bytes in the value of ``name`` starting at ``offset`` with + ``value``. If ``offset`` plus the length of ``value`` exceeds the + length of the original value, the new value will be larger than before. + If ``offset`` exceeds the length of the original value, null bytes + will be used to pad between the end of the previous value and the start + of what's being injected. + + Returns the length of the new string. + """ + return self.execute_command('SETRANGE', name, offset, value) + + def strlen(self, name): + "Return the number of bytes stored in the value of ``name``" + return self.execute_command('STRLEN', name) + + def substr(self, name, start, end=-1): + """ + Return a substring of the string at key ``name``. ``start`` and ``end`` + are 0-based integers specifying the portion of the string to return. + """ + return self.execute_command('SUBSTR', name, start, end) + + def touch(self, *args): + """ + Alters the last access time of a key(s) ``*args``. A key is ignored + if it does not exist. + """ + return self.execute_command('TOUCH', *args) + + def ttl(self, name): + "Returns the number of seconds until the key ``name`` will expire" + return self.execute_command('TTL', name) + + def type(self, name): + "Returns the type of key ``name``" + return self.execute_command('TYPE', name) + + def watch(self, *names): + """ + Watches the values at keys ``names``, or None if the key doesn't exist + """ + warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object')) + + def unwatch(self): + """ + Unwatches the value at key ``name``, or None of the key doesn't exist + """ + warnings.warn( + DeprecationWarning('Call UNWATCH from a Pipeline object')) + + def unlink(self, *names): + "Unlink one or more keys specified by ``names``" + return self.execute_command('UNLINK', *names) + + # LIST COMMANDS + def blpop(self, keys, timeout=0): + """ + LPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to LPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + keys = list_or_args(keys, None) + keys.append(timeout) + return self.execute_command('BLPOP', *keys) + + def brpop(self, keys, timeout=0): + """ + RPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to RPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + keys = list_or_args(keys, None) + keys.append(timeout) + return self.execute_command('BRPOP', *keys) + + def brpoplpush(self, src, dst, timeout=0): + """ + Pop a value off the tail of ``src``, push it on the head of ``dst`` + and then return it. + + This command blocks until a value is in ``src`` or until ``timeout`` + seconds elapse, whichever is first. A ``timeout`` value of 0 blocks + forever. + """ + if timeout is None: + timeout = 0 + return self.execute_command('BRPOPLPUSH', src, dst, timeout) + + def lindex(self, name, index): + """ + Return the item from list ``name`` at position ``index`` + + Negative indexes are supported and will return an item at the + end of the list + """ + return self.execute_command('LINDEX', name, index) + + def linsert(self, name, where, refvalue, value): + """ + Insert ``value`` in list ``name`` either immediately before or after + [``where``] ``refvalue`` + + Returns the new length of the list on success or -1 if ``refvalue`` + is not in the list. + """ + return self.execute_command('LINSERT', name, where, refvalue, value) + + def llen(self, name): + "Return the length of the list ``name``" + return self.execute_command('LLEN', name) + + def lpop(self, name): + "Remove and return the first item of the list ``name``" + return self.execute_command('LPOP', name) + + def lpush(self, name, *values): + "Push ``values`` onto the head of the list ``name``" + return self.execute_command('LPUSH', name, *values) + + def lpushx(self, name, value): + "Push ``value`` onto the head of the list ``name`` if ``name`` exists" + return self.execute_command('LPUSHX', name, value) + + def lrange(self, name, start, end): + """ + Return a slice of the list ``name`` between + position ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LRANGE', name, start, end) + + def lrem(self, name, count, value): + """ + Remove the first ``count`` occurrences of elements equal to ``value`` + from the list stored at ``name``. + + The count argument influences the operation in the following ways: + count > 0: Remove elements equal to value moving from head to tail. + count < 0: Remove elements equal to value moving from tail to head. + count = 0: Remove all elements equal to value. + """ + return self.execute_command('LREM', name, count, value) + + def lset(self, name, index, value): + "Set ``position`` of list ``name`` to ``value``" + return self.execute_command('LSET', name, index, value) + + def ltrim(self, name, start, end): + """ + Trim the list ``name``, removing all values not within the slice + between ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LTRIM', name, start, end) + + def rpop(self, name): + "Remove and return the last item of the list ``name``" + return self.execute_command('RPOP', name) + + def rpoplpush(self, src, dst): + """ + RPOP a value off of the ``src`` list and atomically LPUSH it + on to the ``dst`` list. Returns the value. + """ + return self.execute_command('RPOPLPUSH', src, dst) + + def rpush(self, name, *values): + "Push ``values`` onto the tail of the list ``name``" + return self.execute_command('RPUSH', name, *values) + + def rpushx(self, name, value): + "Push ``value`` onto the tail of the list ``name`` if ``name`` exists" + return self.execute_command('RPUSHX', name, value) + + def sort(self, name, start=None, num=None, by=None, get=None, + desc=False, alpha=False, store=None, groups=False): + """ + Sort and return the list, set or sorted set at ``name``. + + ``start`` and ``num`` allow for paging through the sorted data + + ``by`` allows using an external key to weight and sort the items. + Use an "*" to indicate where in the key the item value is located + + ``get`` allows for returning items from external keys rather than the + sorted data itself. Use an "*" to indicate where in the key + the item value is located + + ``desc`` allows for reversing the sort + + ``alpha`` allows for sorting lexicographically rather than numerically + + ``store`` allows for storing the result of the sort into + the key ``store`` + + ``groups`` if set to True and if ``get`` contains at least two + elements, sort will return a list of tuples, each containing the + values fetched from the arguments to ``get``. + + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + + pieces = [name] + if by is not None: + pieces.append(b'BY') + pieces.append(by) + if start is not None and num is not None: + pieces.append(b'LIMIT') + pieces.append(start) + pieces.append(num) + if get is not None: + # If get is a string assume we want to get a single value. + # Otherwise assume it's an interable and we want to get multiple + # values. We can't just iterate blindly because strings are + # iterable. + if isinstance(get, (bytes, basestring)): + pieces.append(b'GET') + pieces.append(get) + else: + for g in get: + pieces.append(b'GET') + pieces.append(g) + if desc: + pieces.append(b'DESC') + if alpha: + pieces.append(b'ALPHA') + if store is not None: + pieces.append(b'STORE') + pieces.append(store) + + if groups: + if not get or isinstance(get, (bytes, basestring)) or len(get) < 2: + raise DataError('when using "groups" the "get" argument ' + 'must be specified and contain at least ' + 'two keys') + + options = {'groups': len(get) if groups else None} + return self.execute_command('SORT', *pieces, **options) + + # SCAN COMMANDS + def scan(self, cursor=0, match=None, count=None, _type=None): + """ + Incrementally return lists of key names. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` provides a hint to Redis about the number of keys to + return per batch. + + ``_type`` filters the returned values by a particular Redis type. + Stock Redis instances allow for the following types: + HASH, LIST, SET, STREAM, STRING, ZSET + Additionally, Redis modules can expose other types as well. + """ + pieces = [cursor] + if match is not None: + pieces.extend([b'MATCH', match]) + if count is not None: + pieces.extend([b'COUNT', count]) + if _type is not None: + pieces.extend([b'TYPE', _type]) + return self.execute_command('SCAN', *pieces) + + def scan_iter(self, match=None, count=None, _type=None): + """ + Make an iterator using the SCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` provides a hint to Redis about the number of keys to + return per batch. + + ``_type`` filters the returned values by a particular Redis type. + Stock Redis instances allow for the following types: + HASH, LIST, SET, STREAM, STRING, ZSET + Additionally, Redis modules can expose other types as well. + """ + cursor = '0' + while cursor != 0: + cursor, data = self.scan(cursor=cursor, match=match, + count=count, _type=_type) + for item in data: + yield item + + def sscan(self, name, cursor=0, match=None, count=None): + """ + Incrementally return lists of elements in a set. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([b'MATCH', match]) + if count is not None: + pieces.extend([b'COUNT', count]) + return self.execute_command('SSCAN', *pieces) + + def sscan_iter(self, name, match=None, count=None): + """ + Make an iterator using the SSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + cursor = '0' + while cursor != 0: + cursor, data = self.sscan(name, cursor=cursor, + match=match, count=count) + for item in data: + yield item + + def hscan(self, name, cursor=0, match=None, count=None): + """ + Incrementally return key/value slices in a hash. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([b'MATCH', match]) + if count is not None: + pieces.extend([b'COUNT', count]) + return self.execute_command('HSCAN', *pieces) + + def hscan_iter(self, name, match=None, count=None): + """ + Make an iterator using the HSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + cursor = '0' + while cursor != 0: + cursor, data = self.hscan(name, cursor=cursor, + match=match, count=count) + for item in data.items(): + yield item + + def zscan(self, name, cursor=0, match=None, count=None, + score_cast_func=float): + """ + Incrementally return lists of elements in a sorted set. Also return a + cursor indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + + ``score_cast_func`` a callable used to cast the score return value + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([b'MATCH', match]) + if count is not None: + pieces.extend([b'COUNT', count]) + options = {'score_cast_func': score_cast_func} + return self.execute_command('ZSCAN', *pieces, **options) + + def zscan_iter(self, name, match=None, count=None, + score_cast_func=float): + """ + Make an iterator using the ZSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + + ``score_cast_func`` a callable used to cast the score return value + """ + cursor = '0' + while cursor != 0: + cursor, data = self.zscan(name, cursor=cursor, match=match, + count=count, + score_cast_func=score_cast_func) + for item in data: + yield item + + # SET COMMANDS + def sadd(self, name, *values): + "Add ``value(s)`` to set ``name``" + return self.execute_command('SADD', name, *values) + + def scard(self, name): + "Return the number of elements in set ``name``" + return self.execute_command('SCARD', name) + + def sdiff(self, keys, *args): + "Return the difference of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SDIFF', *args) + + def sdiffstore(self, dest, keys, *args): + """ + Store the difference of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SDIFFSTORE', dest, *args) + + def sinter(self, keys, *args): + "Return the intersection of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SINTER', *args) + + def sinterstore(self, dest, keys, *args): + """ + Store the intersection of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SINTERSTORE', dest, *args) + + def sismember(self, name, value): + "Return a boolean indicating if ``value`` is a member of set ``name``" + return self.execute_command('SISMEMBER', name, value) + + def smembers(self, name): + "Return all members of the set ``name``" + return self.execute_command('SMEMBERS', name) + + def smove(self, src, dst, value): + "Move ``value`` from set ``src`` to set ``dst`` atomically" + return self.execute_command('SMOVE', src, dst, value) + + def spop(self, name, count=None): + "Remove and return a random member of set ``name``" + args = (count is not None) and [count] or [] + return self.execute_command('SPOP', name, *args) + + def srandmember(self, name, number=None): + """ + If ``number`` is None, returns a random member of set ``name``. + + If ``number`` is supplied, returns a list of ``number`` random + members of set ``name``. Note this is only available when running + Redis 2.6+. + """ + args = (number is not None) and [number] or [] + return self.execute_command('SRANDMEMBER', name, *args) + + def srem(self, name, *values): + "Remove ``values`` from set ``name``" + return self.execute_command('SREM', name, *values) + + def sunion(self, keys, *args): + "Return the union of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SUNION', *args) + + def sunionstore(self, dest, keys, *args): + """ + Store the union of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SUNIONSTORE', dest, *args) + + # STREAMS COMMANDS + def xack(self, name, groupname, *ids): + """ + Acknowledges the successful processing of one or more messages. + name: name of the stream. + groupname: name of the consumer group. + *ids: message ids to acknowlege. + """ + return self.execute_command('XACK', name, groupname, *ids) + + def xadd(self, name, fields, id='*', maxlen=None, approximate=True): + """ + Add to a stream. + name: name of the stream + fields: dict of field/value pairs to insert into the stream + id: Location to insert this record. By default it is appended. + maxlen: truncate old stream members beyond this size + approximate: actual stream length may be slightly more than maxlen + + """ + pieces = [] + if maxlen is not None: + if not isinstance(maxlen, (int, long)) or maxlen < 1: + raise DataError('XADD maxlen must be a positive integer') + pieces.append(b'MAXLEN') + if approximate: + pieces.append(b'~') + pieces.append(str(maxlen)) + pieces.append(id) + if not isinstance(fields, dict) or len(fields) == 0: + raise DataError('XADD fields must be a non-empty dict') + for pair in iteritems(fields): + pieces.extend(pair) + return self.execute_command('XADD', name, *pieces) + + def xclaim(self, name, groupname, consumername, min_idle_time, message_ids, + idle=None, time=None, retrycount=None, force=False, + justid=False): + """ + Changes the ownership of a pending message. + name: name of the stream. + groupname: name of the consumer group. + consumername: name of a consumer that claims the message. + min_idle_time: filter messages that were idle less than this amount of + milliseconds + message_ids: non-empty list or tuple of message IDs to claim + idle: optional. Set the idle time (last time it was delivered) of the + message in ms + time: optional integer. This is the same as idle but instead of a + relative amount of milliseconds, it sets the idle time to a specific + Unix time (in milliseconds). + retrycount: optional integer. set the retry counter to the specified + value. This counter is incremented every time a message is delivered + again. + force: optional boolean, false by default. Creates the pending message + entry in the PEL even if certain specified IDs are not already in the + PEL assigned to a different client. + justid: optional boolean, false by default. Return just an array of IDs + of messages successfully claimed, without returning the actual message + """ + if not isinstance(min_idle_time, (int, long)) or min_idle_time < 0: + raise DataError("XCLAIM min_idle_time must be a non negative " + "integer") + if not isinstance(message_ids, (list, tuple)) or not message_ids: + raise DataError("XCLAIM message_ids must be a non empty list or " + "tuple of message IDs to claim") + + kwargs = {} + pieces = [name, groupname, consumername, str(min_idle_time)] + pieces.extend(list(message_ids)) + + if idle is not None: + if not isinstance(idle, (int, long)): + raise DataError("XCLAIM idle must be an integer") + pieces.extend((b'IDLE', str(idle))) + if time is not None: + if not isinstance(time, (int, long)): + raise DataError("XCLAIM time must be an integer") + pieces.extend((b'TIME', str(time))) + if retrycount is not None: + if not isinstance(retrycount, (int, long)): + raise DataError("XCLAIM retrycount must be an integer") + pieces.extend((b'RETRYCOUNT', str(retrycount))) + + if force: + if not isinstance(force, bool): + raise DataError("XCLAIM force must be a boolean") + pieces.append(b'FORCE') + if justid: + if not isinstance(justid, bool): + raise DataError("XCLAIM justid must be a boolean") + pieces.append(b'JUSTID') + kwargs['parse_justid'] = True + return self.execute_command('XCLAIM', *pieces, **kwargs) + + def xdel(self, name, *ids): + """ + Deletes one or more messages from a stream. + name: name of the stream. + *ids: message ids to delete. + """ + return self.execute_command('XDEL', name, *ids) + + def xgroup_create(self, name, groupname, id='$', mkstream=False): + """ + Create a new consumer group associated with a stream. + name: name of the stream. + groupname: name of the consumer group. + id: ID of the last item in the stream to consider already delivered. + """ + pieces = ['XGROUP CREATE', name, groupname, id] + if mkstream: + pieces.append(b'MKSTREAM') + return self.execute_command(*pieces) + + def xgroup_delconsumer(self, name, groupname, consumername): + """ + Remove a specific consumer from a consumer group. + Returns the number of pending messages that the consumer had before it + was deleted. + name: name of the stream. + groupname: name of the consumer group. + consumername: name of consumer to delete + """ + return self.execute_command('XGROUP DELCONSUMER', name, groupname, + consumername) + + def xgroup_destroy(self, name, groupname): + """ + Destroy a consumer group. + name: name of the stream. + groupname: name of the consumer group. + """ + return self.execute_command('XGROUP DESTROY', name, groupname) + + def xgroup_setid(self, name, groupname, id): + """ + Set the consumer group last delivered ID to something else. + name: name of the stream. + groupname: name of the consumer group. + id: ID of the last item in the stream to consider already delivered. + """ + return self.execute_command('XGROUP SETID', name, groupname, id) + + def xinfo_consumers(self, name, groupname): + """ + Returns general information about the consumers in the group. + name: name of the stream. + groupname: name of the consumer group. + """ + return self.execute_command('XINFO CONSUMERS', name, groupname) + + def xinfo_groups(self, name): + """ + Returns general information about the consumer groups of the stream. + name: name of the stream. + """ + return self.execute_command('XINFO GROUPS', name) + + def xinfo_stream(self, name): + """ + Returns general information about the stream. + name: name of the stream. + """ + return self.execute_command('XINFO STREAM', name) + + def xlen(self, name): + """ + Returns the number of elements in a given stream. + """ + return self.execute_command('XLEN', name) + + def xpending(self, name, groupname): + """ + Returns information about pending messages of a group. + name: name of the stream. + groupname: name of the consumer group. + """ + return self.execute_command('XPENDING', name, groupname) + + def xpending_range(self, name, groupname, min, max, count, + consumername=None): + """ + Returns information about pending messages, in a range. + name: name of the stream. + groupname: name of the consumer group. + min: minimum stream ID. + max: maximum stream ID. + count: number of messages to return + consumername: name of a consumer to filter by (optional). + """ + pieces = [name, groupname] + if min is not None or max is not None or count is not None: + if min is None or max is None or count is None: + raise DataError("XPENDING must be provided with min, max " + "and count parameters, or none of them. ") + if not isinstance(count, (int, long)) or count < -1: + raise DataError("XPENDING count must be a integer >= -1") + pieces.extend((min, max, str(count))) + if consumername is not None: + if min is None or max is None or count is None: + raise DataError("if XPENDING is provided with consumername," + " it must be provided with min, max and" + " count parameters") + pieces.append(consumername) + return self.execute_command('XPENDING', *pieces, parse_detail=True) + + def xrange(self, name, min='-', max='+', count=None): + """ + Read stream values within an interval. + name: name of the stream. + start: first stream ID. defaults to '-', + meaning the earliest available. + finish: last stream ID. defaults to '+', + meaning the latest available. + count: if set, only return this many items, beginning with the + earliest available. + """ + pieces = [min, max] + if count is not None: + if not isinstance(count, (int, long)) or count < 1: + raise DataError('XRANGE count must be a positive integer') + pieces.append(b'COUNT') + pieces.append(str(count)) + + return self.execute_command('XRANGE', name, *pieces) + + def xread(self, streams, count=None, block=None): + """ + Block and monitor multiple streams for new data. + streams: a dict of stream names to stream IDs, where + IDs indicate the last ID already seen. + count: if set, only return this many items, beginning with the + earliest available. + block: number of milliseconds to wait, if nothing already present. + """ + pieces = [] + if block is not None: + if not isinstance(block, (int, long)) or block < 0: + raise DataError('XREAD block must be a non-negative integer') + pieces.append(b'BLOCK') + pieces.append(str(block)) + if count is not None: + if not isinstance(count, (int, long)) or count < 1: + raise DataError('XREAD count must be a positive integer') + pieces.append(b'COUNT') + pieces.append(str(count)) + if not isinstance(streams, dict) or len(streams) == 0: + raise DataError('XREAD streams must be a non empty dict') + pieces.append(b'STREAMS') + keys, values = izip(*iteritems(streams)) + pieces.extend(keys) + pieces.extend(values) + return self.execute_command('XREAD', *pieces) + + def xreadgroup(self, groupname, consumername, streams, count=None, + block=None, noack=False): + """ + Read from a stream via a consumer group. + groupname: name of the consumer group. + consumername: name of the requesting consumer. + streams: a dict of stream names to stream IDs, where + IDs indicate the last ID already seen. + count: if set, only return this many items, beginning with the + earliest available. + block: number of milliseconds to wait, if nothing already present. + noack: do not add messages to the PEL + """ + pieces = [b'GROUP', groupname, consumername] + if count is not None: + if not isinstance(count, (int, long)) or count < 1: + raise DataError("XREADGROUP count must be a positive integer") + pieces.append(b'COUNT') + pieces.append(str(count)) + if block is not None: + if not isinstance(block, (int, long)) or block < 0: + raise DataError("XREADGROUP block must be a non-negative " + "integer") + pieces.append(b'BLOCK') + pieces.append(str(block)) + if noack: + pieces.append(b'NOACK') + if not isinstance(streams, dict) or len(streams) == 0: + raise DataError('XREADGROUP streams must be a non empty dict') + pieces.append(b'STREAMS') + pieces.extend(streams.keys()) + pieces.extend(streams.values()) + return self.execute_command('XREADGROUP', *pieces) + + def xrevrange(self, name, max='+', min='-', count=None): + """ + Read stream values within an interval, in reverse order. + name: name of the stream + start: first stream ID. defaults to '+', + meaning the latest available. + finish: last stream ID. defaults to '-', + meaning the earliest available. + count: if set, only return this many items, beginning with the + latest available. + """ + pieces = [max, min] + if count is not None: + if not isinstance(count, (int, long)) or count < 1: + raise DataError('XREVRANGE count must be a positive integer') + pieces.append(b'COUNT') + pieces.append(str(count)) + + return self.execute_command('XREVRANGE', name, *pieces) + + def xtrim(self, name, maxlen, approximate=True): + """ + Trims old messages from a stream. + name: name of the stream. + maxlen: truncate old stream messages beyond this size + approximate: actual stream length may be slightly more than maxlen + """ + pieces = [b'MAXLEN'] + if approximate: + pieces.append(b'~') + pieces.append(maxlen) + return self.execute_command('XTRIM', name, *pieces) + + # SORTED SET COMMANDS + def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False): + """ + Set any number of element-name, score pairs to the key ``name``. Pairs + are specified as a dict of element-names keys to score values. + + ``nx`` forces ZADD to only create new elements and not to update + scores for elements that already exist. + + ``xx`` forces ZADD to only update scores of elements that already + exist. New elements will not be added. + + ``ch`` modifies the return value to be the numbers of elements changed. + Changed elements include new elements that were added and elements + whose scores changed. + + ``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a + single element/score pair can be specified and the score is the amount + the existing score will be incremented by. When using this mode the + return value of ZADD will be the new score of the element. + + The return value of ZADD varies based on the mode specified. With no + options, ZADD returns the number of new elements added to the sorted + set. + """ + if not mapping: + raise DataError("ZADD requires at least one element/score pair") + if nx and xx: + raise DataError("ZADD allows either 'nx' or 'xx', not both") + if incr and len(mapping) != 1: + raise DataError("ZADD option 'incr' only works when passing a " + "single element/score pair") + pieces = [] + options = {} + if nx: + pieces.append(b'NX') + if xx: + pieces.append(b'XX') + if ch: + pieces.append(b'CH') + if incr: + pieces.append(b'INCR') + options['as_score'] = True + for pair in iteritems(mapping): + pieces.append(pair[1]) + pieces.append(pair[0]) + return self.execute_command('ZADD', name, *pieces, **options) + + def zcard(self, name): + "Return the number of elements in the sorted set ``name``" + return self.execute_command('ZCARD', name) + + def zcount(self, name, min, max): + """ + Returns the number of elements in the sorted set at key ``name`` with + a score between ``min`` and ``max``. + """ + return self.execute_command('ZCOUNT', name, min, max) + + def zincrby(self, name, amount, value): + "Increment the score of ``value`` in sorted set ``name`` by ``amount``" + return self.execute_command('ZINCRBY', name, amount, value) + + def zinterstore(self, dest, keys, aggregate=None): + """ + Intersect multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZINTERSTORE', dest, keys, aggregate) + + def zlexcount(self, name, min, max): + """ + Return the number of items in the sorted set ``name`` between the + lexicographical range ``min`` and ``max``. + """ + return self.execute_command('ZLEXCOUNT', name, min, max) + + def zpopmax(self, name, count=None): + """ + Remove and return up to ``count`` members with the highest scores + from the sorted set ``name``. + """ + args = (count is not None) and [count] or [] + options = { + 'withscores': True + } + return self.execute_command('ZPOPMAX', name, *args, **options) + + def zpopmin(self, name, count=None): + """ + Remove and return up to ``count`` members with the lowest scores + from the sorted set ``name``. + """ + args = (count is not None) and [count] or [] + options = { + 'withscores': True + } + return self.execute_command('ZPOPMIN', name, *args, **options) + + def bzpopmax(self, keys, timeout=0): + """ + ZPOPMAX a value off of the first non-empty sorted set + named in the ``keys`` list. + + If none of the sorted sets in ``keys`` has a value to ZPOPMAX, + then block for ``timeout`` seconds, or until a member gets added + to one of the sorted sets. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + keys = list_or_args(keys, None) + keys.append(timeout) + return self.execute_command('BZPOPMAX', *keys) + + def bzpopmin(self, keys, timeout=0): + """ + ZPOPMIN a value off of the first non-empty sorted set + named in the ``keys`` list. + + If none of the sorted sets in ``keys`` has a value to ZPOPMIN, + then block for ``timeout`` seconds, or until a member gets added + to one of the sorted sets. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + keys = list_or_args(keys, None) + keys.append(timeout) + return self.execute_command('BZPOPMIN', *keys) + + def zrange(self, name, start, end, desc=False, withscores=False, + score_cast_func=float): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``end`` sorted in ascending order. + + ``start`` and ``end`` can be negative, indicating the end of the range. + + ``desc`` a boolean indicating whether to sort the results descendingly + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + if desc: + return self.zrevrange(name, start, end, withscores, + score_cast_func) + pieces = ['ZRANGE', name, start, end] + if withscores: + pieces.append(b'WITHSCORES') + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrangebylex(self, name, min, max, start=None, num=None): + """ + Return the lexicographical range of values from sorted set ``name`` + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice of the + range. + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYLEX', name, min, max] + if start is not None and num is not None: + pieces.extend([b'LIMIT', start, num]) + return self.execute_command(*pieces) + + def zrevrangebylex(self, name, max, min, start=None, num=None): + """ + Return the reversed lexicographical range of values from sorted set + ``name`` between ``max`` and ``min``. + + If ``start`` and ``num`` are specified, then return a slice of the + range. + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + pieces = ['ZREVRANGEBYLEX', name, max, min] + if start is not None and num is not None: + pieces.extend([b'LIMIT', start, num]) + return self.execute_command(*pieces) + + def zrangebyscore(self, name, min, max, start=None, num=None, + withscores=False, score_cast_func=float): + """ + Return a range of values from the sorted set ``name`` with scores + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice + of the range. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + `score_cast_func`` a callable used to cast the score return value + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYSCORE', name, min, max] + if start is not None and num is not None: + pieces.extend([b'LIMIT', start, num]) + if withscores: + pieces.append(b'WITHSCORES') + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrank(self, name, value): + """ + Returns a 0-based value indicating the rank of ``value`` in sorted set + ``name`` + """ + return self.execute_command('ZRANK', name, value) + + def zrem(self, name, *values): + "Remove member ``values`` from sorted set ``name``" + return self.execute_command('ZREM', name, *values) + + def zremrangebylex(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` between the + lexicographical range specified by ``min`` and ``max``. + + Returns the number of elements removed. + """ + return self.execute_command('ZREMRANGEBYLEX', name, min, max) + + def zremrangebyrank(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with ranks between + ``min`` and ``max``. Values are 0-based, ordered from smallest score + to largest. Values can be negative indicating the highest scores. + Returns the number of elements removed + """ + return self.execute_command('ZREMRANGEBYRANK', name, min, max) + + def zremrangebyscore(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with scores + between ``min`` and ``max``. Returns the number of elements removed. + """ + return self.execute_command('ZREMRANGEBYSCORE', name, min, max) + + def zrevrange(self, name, start, end, withscores=False, + score_cast_func=float): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``end`` sorted in descending order. + + ``start`` and ``end`` can be negative, indicating the end of the range. + + ``withscores`` indicates to return the scores along with the values + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + pieces = ['ZREVRANGE', name, start, end] + if withscores: + pieces.append(b'WITHSCORES') + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrevrangebyscore(self, name, max, min, start=None, num=None, + withscores=False, score_cast_func=float): + """ + Return a range of values from the sorted set ``name`` with scores + between ``min`` and ``max`` in descending order. + + If ``start`` and ``num`` are specified, then return a slice + of the range. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise DataError("``start`` and ``num`` must both be specified") + pieces = ['ZREVRANGEBYSCORE', name, max, min] + if start is not None and num is not None: + pieces.extend([b'LIMIT', start, num]) + if withscores: + pieces.append(b'WITHSCORES') + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrevrank(self, name, value): + """ + Returns a 0-based value indicating the descending rank of + ``value`` in sorted set ``name`` + """ + return self.execute_command('ZREVRANK', name, value) + + def zscore(self, name, value): + "Return the score of element ``value`` in sorted set ``name``" + return self.execute_command('ZSCORE', name, value) + + def zunionstore(self, dest, keys, aggregate=None): + """ + Union multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate) + + def _zaggregate(self, command, dest, keys, aggregate=None): + pieces = [command, dest, len(keys)] + if isinstance(keys, dict): + keys, weights = iterkeys(keys), itervalues(keys) + else: + weights = None + pieces.extend(keys) + if weights: + pieces.append(b'WEIGHTS') + pieces.extend(weights) + if aggregate: + pieces.append(b'AGGREGATE') + pieces.append(aggregate) + return self.execute_command(*pieces) + + # HYPERLOGLOG COMMANDS + def pfadd(self, name, *values): + "Adds the specified elements to the specified HyperLogLog." + return self.execute_command('PFADD', name, *values) + + def pfcount(self, *sources): + """ + Return the approximated cardinality of + the set observed by the HyperLogLog at key(s). + """ + return self.execute_command('PFCOUNT', *sources) + + def pfmerge(self, dest, *sources): + "Merge N different HyperLogLogs into a single one." + return self.execute_command('PFMERGE', dest, *sources) + + # HASH COMMANDS + def hdel(self, name, *keys): + "Delete ``keys`` from hash ``name``" + return self.execute_command('HDEL', name, *keys) + + def hexists(self, name, key): + "Returns a boolean indicating if ``key`` exists within hash ``name``" + return self.execute_command('HEXISTS', name, key) + + def hget(self, name, key): + "Return the value of ``key`` within the hash ``name``" + return self.execute_command('HGET', name, key) + + def hgetall(self, name): + "Return a Python dict of the hash's name/value pairs" + return self.execute_command('HGETALL', name) + + def hincrby(self, name, key, amount=1): + "Increment the value of ``key`` in hash ``name`` by ``amount``" + return self.execute_command('HINCRBY', name, key, amount) + + def hincrbyfloat(self, name, key, amount=1.0): + """ + Increment the value of ``key`` in hash ``name`` by floating ``amount`` + """ + return self.execute_command('HINCRBYFLOAT', name, key, amount) + + def hkeys(self, name): + "Return the list of keys within hash ``name``" + return self.execute_command('HKEYS', name) + + def hlen(self, name): + "Return the number of elements in hash ``name``" + return self.execute_command('HLEN', name) + + def hset(self, name, key=None, value=None, mapping=None): + """ + Set ``key`` to ``value`` within hash ``name``, + ``mapping`` accepts a dict of key/value pairs that that will be + added to hash ``name``. + Returns the number of fields that were added. + """ + if key is None and not mapping: + raise DataError("'hset' with no key value pairs") + items = [] + if key is not None: + items.extend((key, value)) + if mapping: + for pair in mapping.items(): + items.extend(pair) + + return self.execute_command('HSET', name, *items) + + def hsetnx(self, name, key, value): + """ + Set ``key`` to ``value`` within hash ``name`` if ``key`` does not + exist. Returns 1 if HSETNX created a field, otherwise 0. + """ + return self.execute_command('HSETNX', name, key, value) + + def hmset(self, name, mapping): + """ + Set key to value within hash ``name`` for each corresponding + key and value from the ``mapping`` dict. + """ + warnings.warn( + '%s.hmset() is deprecated. Use %s.hset() instead.' + % (self.__class__.__name__, self.__class__.__name__), + DeprecationWarning, + stacklevel=2, + ) + if not mapping: + raise DataError("'hmset' with 'mapping' of length 0") + items = [] + for pair in iteritems(mapping): + items.extend(pair) + return self.execute_command('HMSET', name, *items) + + def hmget(self, name, keys, *args): + "Returns a list of values ordered identically to ``keys``" + args = list_or_args(keys, args) + return self.execute_command('HMGET', name, *args) + + def hvals(self, name): + "Return the list of values within hash ``name``" + return self.execute_command('HVALS', name) + + def hstrlen(self, name, key): + """ + Return the number of bytes stored in the value of ``key`` + within hash ``name`` + """ + return self.execute_command('HSTRLEN', name, key) + + def publish(self, channel, message): + """ + Publish ``message`` on ``channel``. + Returns the number of subscribers the message was delivered to. + """ + return self.execute_command('PUBLISH', channel, message) + + def pubsub_channels(self, pattern='*'): + """ + Return a list of channels that have at least one subscriber + """ + return self.execute_command('PUBSUB CHANNELS', pattern) + + def pubsub_numpat(self): + """ + Returns the number of subscriptions to patterns + """ + return self.execute_command('PUBSUB NUMPAT') + + def pubsub_numsub(self, *args): + """ + Return a list of (channel, number of subscribers) tuples + for each channel given in ``*args`` + """ + return self.execute_command('PUBSUB NUMSUB', *args) + + def cluster(self, cluster_arg, *args): + return self.execute_command('CLUSTER %s' % cluster_arg.upper(), *args) + + def eval(self, script, numkeys, *keys_and_args): + """ + Execute the Lua ``script``, specifying the ``numkeys`` the script + will touch and the key names and argument values in ``keys_and_args``. + Returns the result of the script. + + In practice, use the object returned by ``register_script``. This + function exists purely for Redis API completion. + """ + return self.execute_command('EVAL', script, numkeys, *keys_and_args) + + def evalsha(self, sha, numkeys, *keys_and_args): + """ + Use the ``sha`` to execute a Lua script already registered via EVAL + or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the + key names and argument values in ``keys_and_args``. Returns the result + of the script. + + In practice, use the object returned by ``register_script``. This + function exists purely for Redis API completion. + """ + return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args) + + def script_exists(self, *args): + """ + Check if a script exists in the script cache by specifying the SHAs of + each script as ``args``. Returns a list of boolean values indicating if + if each already script exists in the cache. + """ + return self.execute_command('SCRIPT EXISTS', *args) + + def script_flush(self): + "Flush all scripts from the script cache" + return self.execute_command('SCRIPT FLUSH') + + def script_kill(self): + "Kill the currently executing Lua script" + return self.execute_command('SCRIPT KILL') + + def script_load(self, script): + "Load a Lua ``script`` into the script cache. Returns the SHA." + return self.execute_command('SCRIPT LOAD', script) + + def register_script(self, script): + """ + Register a Lua ``script`` specifying the ``keys`` it will touch. + Returns a Script object that is callable and hides the complexity of + deal with scripts, keys, and shas. This is the preferred way to work + with Lua scripts. + """ + return Script(self, script) + + # GEO COMMANDS + def geoadd(self, name, *values): + """ + Add the specified geospatial items to the specified key identified + by the ``name`` argument. The Geospatial items are given as ordered + members of the ``values`` argument, each item or place is formed by + the triad longitude, latitude and name. + """ + if len(values) % 3 != 0: + raise DataError("GEOADD requires places with lon, lat and name" + " values") + return self.execute_command('GEOADD', name, *values) + + def geodist(self, name, place1, place2, unit=None): + """ + Return the distance between ``place1`` and ``place2`` members of the + ``name`` key. + The units must be one of the following : m, km mi, ft. By default + meters are used. + """ + pieces = [name, place1, place2] + if unit and unit not in ('m', 'km', 'mi', 'ft'): + raise DataError("GEODIST invalid unit") + elif unit: + pieces.append(unit) + return self.execute_command('GEODIST', *pieces) + + def geohash(self, name, *values): + """ + Return the geo hash string for each item of ``values`` members of + the specified key identified by the ``name`` argument. + """ + return self.execute_command('GEOHASH', name, *values) + + def geopos(self, name, *values): + """ + Return the positions of each item of ``values`` as members of + the specified key identified by the ``name`` argument. Each position + is represented by the pairs lon and lat. + """ + return self.execute_command('GEOPOS', name, *values) + + def georadius(self, name, longitude, latitude, radius, unit=None, + withdist=False, withcoord=False, withhash=False, count=None, + sort=None, store=None, store_dist=None): + """ + Return the members of the specified key identified by the + ``name`` argument which are within the borders of the area specified + with the ``latitude`` and ``longitude`` location and the maximum + distance from the center specified by the ``radius`` value. + + The units must be one of the following : m, km mi, ft. By default + + ``withdist`` indicates to return the distances of each place. + + ``withcoord`` indicates to return the latitude and longitude of + each place. + + ``withhash`` indicates to return the geohash string of each place. + + ``count`` indicates to return the number of elements up to N. + + ``sort`` indicates to return the places in a sorted way, ASC for + nearest to fairest and DESC for fairest to nearest. + + ``store`` indicates to save the places names in a sorted set named + with a specific key, each element of the destination sorted set is + populated with the score got from the original geo sorted set. + + ``store_dist`` indicates to save the places names in a sorted set + named with a specific key, instead of ``store`` the sorted set + destination score is set with the distance. + """ + return self._georadiusgeneric('GEORADIUS', + name, longitude, latitude, radius, + unit=unit, withdist=withdist, + withcoord=withcoord, withhash=withhash, + count=count, sort=sort, store=store, + store_dist=store_dist) + + def georadiusbymember(self, name, member, radius, unit=None, + withdist=False, withcoord=False, withhash=False, + count=None, sort=None, store=None, store_dist=None): + """ + This command is exactly like ``georadius`` with the sole difference + that instead of taking, as the center of the area to query, a longitude + and latitude value, it takes the name of a member already existing + inside the geospatial index represented by the sorted set. + """ + return self._georadiusgeneric('GEORADIUSBYMEMBER', + name, member, radius, unit=unit, + withdist=withdist, withcoord=withcoord, + withhash=withhash, count=count, + sort=sort, store=store, + store_dist=store_dist) + + def _georadiusgeneric(self, command, *args, **kwargs): + pieces = list(args) + if kwargs['unit'] and kwargs['unit'] not in ('m', 'km', 'mi', 'ft'): + raise DataError("GEORADIUS invalid unit") + elif kwargs['unit']: + pieces.append(kwargs['unit']) + else: + pieces.append('m',) + + for arg_name, byte_repr in ( + ('withdist', b'WITHDIST'), + ('withcoord', b'WITHCOORD'), + ('withhash', b'WITHHASH')): + if kwargs[arg_name]: + pieces.append(byte_repr) + + if kwargs['count']: + pieces.extend([b'COUNT', kwargs['count']]) + + if kwargs['sort']: + if kwargs['sort'] == 'ASC': + pieces.append(b'ASC') + elif kwargs['sort'] == 'DESC': + pieces.append(b'DESC') + else: + raise DataError("GEORADIUS invalid sort") + + if kwargs['store'] and kwargs['store_dist']: + raise DataError("GEORADIUS store and store_dist cant be set" + " together") + + if kwargs['store']: + pieces.extend([b'STORE', kwargs['store']]) + + if kwargs['store_dist']: + pieces.extend([b'STOREDIST', kwargs['store_dist']]) + + return self.execute_command(command, *pieces, **kwargs) + + +StrictRedis = Redis + + +class Monitor(object): + """ + Monitor is useful for handling the MONITOR command to the redis server. + next_command() method returns one command from monitor + listen() method yields commands from monitor. + """ + monitor_re = re.compile(r'\[(\d+) (.*)\] (.*)') + command_re = re.compile(r'"(.*?)(? conn.next_health_check: + conn.send_command('PING', self.HEALTH_CHECK_MESSAGE, + check_health=False) + + def _normalize_keys(self, data): + """ + normalize channel/pattern names to be either bytes or strings + based on whether responses are automatically decoded. this saves us + from coercing the value for each message coming in. + """ + encode = self.encoder.encode + decode = self.encoder.decode + return {decode(encode(k)): v for k, v in iteritems(data)} + + def psubscribe(self, *args, **kwargs): + """ + Subscribe to channel patterns. Patterns supplied as keyword arguments + expect a pattern name as the key and a callable as the value. A + pattern's callable will be invoked automatically when a message is + received on that pattern rather than producing a message via + ``listen()``. + """ + if args: + args = list_or_args(args[0], args[1:]) + new_patterns = dict.fromkeys(args) + new_patterns.update(kwargs) + ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns)) + # update the patterns dict AFTER we send the command. we don't want to + # subscribe twice to these patterns, once for the command and again + # for the reconnection. + new_patterns = self._normalize_keys(new_patterns) + self.patterns.update(new_patterns) + self.pending_unsubscribe_patterns.difference_update(new_patterns) + return ret_val + + def punsubscribe(self, *args): + """ + Unsubscribe from the supplied patterns. If empty, unsubscribe from + all patterns. + """ + if args: + args = list_or_args(args[0], args[1:]) + patterns = self._normalize_keys(dict.fromkeys(args)) + else: + patterns = self.patterns + self.pending_unsubscribe_patterns.update(patterns) + return self.execute_command('PUNSUBSCRIBE', *args) + + def subscribe(self, *args, **kwargs): + """ + Subscribe to channels. Channels supplied as keyword arguments expect + a channel name as the key and a callable as the value. A channel's + callable will be invoked automatically when a message is received on + that channel rather than producing a message via ``listen()`` or + ``get_message()``. + """ + if args: + args = list_or_args(args[0], args[1:]) + new_channels = dict.fromkeys(args) + new_channels.update(kwargs) + ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels)) + # update the channels dict AFTER we send the command. we don't want to + # subscribe twice to these channels, once for the command and again + # for the reconnection. + new_channels = self._normalize_keys(new_channels) + self.channels.update(new_channels) + self.pending_unsubscribe_channels.difference_update(new_channels) + return ret_val + + def unsubscribe(self, *args): + """ + Unsubscribe from the supplied channels. If empty, unsubscribe from + all channels + """ + if args: + args = list_or_args(args[0], args[1:]) + channels = self._normalize_keys(dict.fromkeys(args)) + else: + channels = self.channels + self.pending_unsubscribe_channels.update(channels) + return self.execute_command('UNSUBSCRIBE', *args) + + def listen(self): + "Listen for messages on channels this client has been subscribed to" + while self.subscribed: + response = self.handle_message(self.parse_response(block=True)) + if response is not None: + yield response + + def get_message(self, ignore_subscribe_messages=False, timeout=0): + """ + Get the next message if one is available, otherwise None. + + If timeout is specified, the system will wait for `timeout` seconds + before returning. Timeout should be specified as a floating point + number. + """ + response = self.parse_response(block=False, timeout=timeout) + if response: + return self.handle_message(response, ignore_subscribe_messages) + return None + + def ping(self, message=None): + """ + Ping the Redis server + """ + message = '' if message is None else message + return self.execute_command('PING', message) + + def handle_message(self, response, ignore_subscribe_messages=False): + """ + Parses a pub/sub message. If the channel or pattern was subscribed to + with a message handler, the handler is invoked instead of a parsed + message being returned. + """ + message_type = nativestr(response[0]) + if message_type == 'pmessage': + message = { + 'type': message_type, + 'pattern': response[1], + 'channel': response[2], + 'data': response[3] + } + elif message_type == 'pong': + message = { + 'type': message_type, + 'pattern': None, + 'channel': None, + 'data': response[1] + } + else: + message = { + 'type': message_type, + 'pattern': None, + 'channel': response[1], + 'data': response[2] + } + + # if this is an unsubscribe message, remove it from memory + if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES: + if message_type == 'punsubscribe': + pattern = response[1] + if pattern in self.pending_unsubscribe_patterns: + self.pending_unsubscribe_patterns.remove(pattern) + self.patterns.pop(pattern, None) + else: + channel = response[1] + if channel in self.pending_unsubscribe_channels: + self.pending_unsubscribe_channels.remove(channel) + self.channels.pop(channel, None) + + if message_type in self.PUBLISH_MESSAGE_TYPES: + # if there's a message handler, invoke it + if message_type == 'pmessage': + handler = self.patterns.get(message['pattern'], None) + else: + handler = self.channels.get(message['channel'], None) + if handler: + handler(message) + return None + elif message_type != 'pong': + # this is a subscribe/unsubscribe message. ignore if we don't + # want them + if ignore_subscribe_messages or self.ignore_subscribe_messages: + return None + + return message + + def run_in_thread(self, sleep_time=0, daemon=False): + for channel, handler in iteritems(self.channels): + if handler is None: + raise PubSubError("Channel: '%s' has no handler registered" % + channel) + for pattern, handler in iteritems(self.patterns): + if handler is None: + raise PubSubError("Pattern: '%s' has no handler registered" % + pattern) + + thread = PubSubWorkerThread(self, sleep_time, daemon=daemon) + thread.start() + return thread + + +class PubSubWorkerThread(threading.Thread): + def __init__(self, pubsub, sleep_time, daemon=False): + super(PubSubWorkerThread, self).__init__() + self.daemon = daemon + self.pubsub = pubsub + self.sleep_time = sleep_time + self._running = threading.Event() + + def run(self): + if self._running.is_set(): + return + self._running.set() + pubsub = self.pubsub + sleep_time = self.sleep_time + while self._running.is_set(): + pubsub.get_message(ignore_subscribe_messages=True, + timeout=sleep_time) + pubsub.close() + + def stop(self): + # trip the flag so the run loop exits. the run loop will + # close the pubsub connection, which disconnects the socket + # and returns the connection to the pool. + self._running.clear() + + +class Pipeline(Redis): + """ + Pipelines provide a way to transmit multiple commands to the Redis server + in one transmission. This is convenient for batch processing, such as + saving all the values in a list to Redis. + + All commands executed within a pipeline are wrapped with MULTI and EXEC + calls. This guarantees all commands executed in the pipeline will be + executed atomically. + + Any command raising an exception does *not* halt the execution of + subsequent commands in the pipeline. Instead, the exception is caught + and its instance is placed into the response list returned by execute(). + Code iterating over the response list should be able to deal with an + instance of an exception as a potential value. In general, these will be + ResponseError exceptions, such as those raised when issuing a command + on a key of a different datatype. + """ + + UNWATCH_COMMANDS = {'DISCARD', 'EXEC', 'UNWATCH'} + + def __init__(self, connection_pool, response_callbacks, transaction, + shard_hint): + self.connection_pool = connection_pool + self.connection = None + self.response_callbacks = response_callbacks + self.transaction = transaction + self.shard_hint = shard_hint + + self.watching = False + self.reset() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.reset() + + def __del__(self): + self.reset() + + def __len__(self): + return len(self.command_stack) + + def __nonzero__(self): + "Pipeline instances should always evaluate to True on Python 2.7" + return True + + def __bool__(self): + "Pipeline instances should always evaluate to True on Python 3+" + return True + + def reset(self): + self.command_stack = [] + self.scripts = set() + # make sure to reset the connection state in the event that we were + # watching something + if self.watching and self.connection: + try: + # call this manually since our unwatch or + # immediate_execute_command methods can call reset() + self.connection.send_command('UNWATCH') + self.connection.read_response() + except ConnectionError: + # disconnect will also remove any previous WATCHes + self.connection.disconnect() + # clean up the other instance attributes + self.watching = False + self.explicit_transaction = False + # we can safely return the connection to the pool here since we're + # sure we're no longer WATCHing anything + if self.connection: + self.connection_pool.release(self.connection) + self.connection = None + + def multi(self): + """ + Start a transactional block of the pipeline after WATCH commands + are issued. End the transactional block with `execute`. + """ + if self.explicit_transaction: + raise RedisError('Cannot issue nested calls to MULTI') + if self.command_stack: + raise RedisError('Commands without an initial WATCH have already ' + 'been issued') + self.explicit_transaction = True + + def execute_command(self, *args, **kwargs): + if (self.watching or args[0] == 'WATCH') and \ + not self.explicit_transaction: + return self.immediate_execute_command(*args, **kwargs) + return self.pipeline_execute_command(*args, **kwargs) + + def immediate_execute_command(self, *args, **options): + """ + Execute a command immediately, but don't auto-retry on a + ConnectionError if we're already WATCHing a variable. Used when + issuing WATCH or subsequent commands retrieving their values but before + MULTI is called. + """ + command_name = args[0] + conn = self.connection + # if this is the first call, we need a connection + if not conn: + conn = self.connection_pool.get_connection(command_name, + self.shard_hint) + self.connection = conn + try: + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + except (ConnectionError, TimeoutError) as e: + conn.disconnect() + # if we were already watching a variable, the watch is no longer + # valid since this connection has died. raise a WatchError, which + # indicates the user should retry this transaction. + if self.watching: + self.reset() + raise WatchError("A ConnectionError occurred on while " + "watching one or more keys") + # if retry_on_timeout is not set, or the error is not + # a TimeoutError, raise it + if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): + self.reset() + raise + + # retry_on_timeout is set, this is a TimeoutError and we are not + # already WATCHing any variables. retry the command. + try: + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + except (ConnectionError, TimeoutError): + # a subsequent failure should simply be raised + self.reset() + raise + + def pipeline_execute_command(self, *args, **options): + """ + Stage a command to be executed when execute() is next called + + Returns the current Pipeline object back so commands can be + chained together, such as: + + pipe = pipe.set('foo', 'bar').incr('baz').decr('bang') + + At some other point, you can then run: pipe.execute(), + which will execute all commands queued in the pipe. + """ + self.command_stack.append((args, options)) + return self + + def _execute_transaction(self, connection, commands, raise_on_error): + cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})]) + all_cmds = connection.pack_commands([args for args, options in cmds + if EMPTY_RESPONSE not in options]) + connection.send_packed_command(all_cmds) + errors = [] + + # parse off the response for MULTI + # NOTE: we need to handle ResponseErrors here and continue + # so that we read all the additional command messages from + # the socket + try: + self.parse_response(connection, '_') + except ResponseError as e: + errors.append((0, e)) + + # and all the other commands + for i, command in enumerate(commands): + if EMPTY_RESPONSE in command[1]: + errors.append((i, command[1][EMPTY_RESPONSE])) + else: + try: + self.parse_response(connection, '_') + except ResponseError as e: + self.annotate_exception(e, i + 1, command[0]) + errors.append((i, e)) + + # parse the EXEC. + try: + response = self.parse_response(connection, '_') + except ExecAbortError: + if errors: + raise errors[0][1] + raise + + # EXEC clears any watched keys + self.watching = False + + if response is None: + raise WatchError("Watched variable changed.") + + # put any parse errors into the response + for i, e in errors: + response.insert(i, e) + + if len(response) != len(commands): + self.connection.disconnect() + raise ResponseError("Wrong number of response items from " + "pipeline execution") + + # find any errors in the response and raise if necessary + if raise_on_error: + self.raise_first_error(commands, response) + + # We have to run response callbacks manually + data = [] + for r, cmd in izip(response, commands): + if not isinstance(r, Exception): + args, options = cmd + command_name = args[0] + if command_name in self.response_callbacks: + r = self.response_callbacks[command_name](r, **options) + data.append(r) + return data + + def _execute_pipeline(self, connection, commands, raise_on_error): + # build up all commands into a single request to increase network perf + all_cmds = connection.pack_commands([args for args, _ in commands]) + connection.send_packed_command(all_cmds) + + response = [] + for args, options in commands: + try: + response.append( + self.parse_response(connection, args[0], **options)) + except ResponseError as e: + response.append(e) + + if raise_on_error: + self.raise_first_error(commands, response) + return response + + def raise_first_error(self, commands, response): + for i, r in enumerate(response): + if isinstance(r, ResponseError): + self.annotate_exception(r, i + 1, commands[i][0]) + raise r + + def annotate_exception(self, exception, number, command): + cmd = ' '.join(imap(safe_unicode, command)) + msg = 'Command # %d (%s) of pipeline caused error: %s' % ( + number, cmd, safe_unicode(exception.args[0])) + exception.args = (msg,) + exception.args[1:] + + def parse_response(self, connection, command_name, **options): + result = Redis.parse_response( + self, connection, command_name, **options) + if command_name in self.UNWATCH_COMMANDS: + self.watching = False + elif command_name == 'WATCH': + self.watching = True + return result + + def load_scripts(self): + # make sure all scripts that are about to be run on this pipeline exist + scripts = list(self.scripts) + immediate = self.immediate_execute_command + shas = [s.sha for s in scripts] + # we can't use the normal script_* methods because they would just + # get buffered in the pipeline. + exists = immediate('SCRIPT EXISTS', *shas) + if not all(exists): + for s, exist in izip(scripts, exists): + if not exist: + s.sha = immediate('SCRIPT LOAD', s.script) + + def execute(self, raise_on_error=True): + "Execute all the commands in the current pipeline" + stack = self.command_stack + if not stack and not self.watching: + return [] + if self.scripts: + self.load_scripts() + if self.transaction or self.explicit_transaction: + execute = self._execute_transaction + else: + execute = self._execute_pipeline + + conn = self.connection + if not conn: + conn = self.connection_pool.get_connection('MULTI', + self.shard_hint) + # assign to self.connection so reset() releases the connection + # back to the pool after we're done + self.connection = conn + + try: + return execute(conn, stack, raise_on_error) + except (ConnectionError, TimeoutError) as e: + conn.disconnect() + # if we were watching a variable, the watch is no longer valid + # since this connection has died. raise a WatchError, which + # indicates the user should retry this transaction. + if self.watching: + raise WatchError("A ConnectionError occurred on while " + "watching one or more keys") + # if retry_on_timeout is not set, or the error is not + # a TimeoutError, raise it + if not (conn.retry_on_timeout and isinstance(e, TimeoutError)): + raise + # retry a TimeoutError when retry_on_timeout is set + return execute(conn, stack, raise_on_error) + finally: + self.reset() + + def watch(self, *names): + "Watches the values at keys ``names``" + if self.explicit_transaction: + raise RedisError('Cannot issue a WATCH after a MULTI') + return self.execute_command('WATCH', *names) + + def unwatch(self): + "Unwatches all previously specified keys" + return self.watching and self.execute_command('UNWATCH') or True + + +class Script(object): + "An executable Lua script object returned by ``register_script``" + + def __init__(self, registered_client, script): + self.registered_client = registered_client + self.script = script + # Precalculate and store the SHA1 hex digest of the script. + + if isinstance(script, basestring): + # We need the encoding from the client in order to generate an + # accurate byte representation of the script + encoder = registered_client.connection_pool.get_encoder() + script = encoder.encode(script) + self.sha = hashlib.sha1(script).hexdigest() + + def __call__(self, keys=[], args=[], client=None): + "Execute the script, passing any required ``args``" + if client is None: + client = self.registered_client + args = tuple(keys) + tuple(args) + # make sure the Redis server knows about the script + if isinstance(client, Pipeline): + # Make sure the pipeline can register the script before executing. + client.scripts.add(self) + try: + return client.evalsha(self.sha, len(keys), *args) + except NoScriptError: + # Maybe the client is pointed to a differnet server than the client + # that created this instance? + # Overwrite the sha just in case there was a discrepancy. + self.sha = client.script_load(self.script) + return client.evalsha(self.sha, len(keys), *args) + + +class BitFieldOperation(object): + """ + Command builder for BITFIELD commands. + """ + def __init__(self, client, key, default_overflow=None): + self.client = client + self.key = key + self._default_overflow = default_overflow + self.reset() + + def reset(self): + """ + Reset the state of the instance to when it was constructed + """ + self.operations = [] + self._last_overflow = 'WRAP' + self.overflow(self._default_overflow or self._last_overflow) + + def overflow(self, overflow): + """ + Update the overflow algorithm of successive INCRBY operations + :param overflow: Overflow algorithm, one of WRAP, SAT, FAIL. See the + Redis docs for descriptions of these algorithmsself. + :returns: a :py:class:`BitFieldOperation` instance. + """ + overflow = overflow.upper() + if overflow != self._last_overflow: + self._last_overflow = overflow + self.operations.append(('OVERFLOW', overflow)) + return self + + def incrby(self, fmt, offset, increment, overflow=None): + """ + Increment a bitfield by a given amount. + :param fmt: format-string for the bitfield being updated, e.g. 'u8' + for an unsigned 8-bit integer. + :param offset: offset (in number of bits). If prefixed with a + '#', this is an offset multiplier, e.g. given the arguments + fmt='u8', offset='#2', the offset will be 16. + :param int increment: value to increment the bitfield by. + :param str overflow: overflow algorithm. Defaults to WRAP, but other + acceptable values are SAT and FAIL. See the Redis docs for + descriptions of these algorithms. + :returns: a :py:class:`BitFieldOperation` instance. + """ + if overflow is not None: + self.overflow(overflow) + + self.operations.append(('INCRBY', fmt, offset, increment)) + return self + + def get(self, fmt, offset): + """ + Get the value of a given bitfield. + :param fmt: format-string for the bitfield being read, e.g. 'u8' for + an unsigned 8-bit integer. + :param offset: offset (in number of bits). If prefixed with a + '#', this is an offset multiplier, e.g. given the arguments + fmt='u8', offset='#2', the offset will be 16. + :returns: a :py:class:`BitFieldOperation` instance. + """ + self.operations.append(('GET', fmt, offset)) + return self + + def set(self, fmt, offset, value): + """ + Set the value of a given bitfield. + :param fmt: format-string for the bitfield being read, e.g. 'u8' for + an unsigned 8-bit integer. + :param offset: offset (in number of bits). If prefixed with a + '#', this is an offset multiplier, e.g. given the arguments + fmt='u8', offset='#2', the offset will be 16. + :param int value: value to set at the given position. + :returns: a :py:class:`BitFieldOperation` instance. + """ + self.operations.append(('SET', fmt, offset, value)) + return self + + @property + def command(self): + cmd = ['BITFIELD', self.key] + for ops in self.operations: + cmd.extend(ops) + return cmd + + def execute(self): + """ + Execute the operation(s) in a single BITFIELD command. The return value + is a list of values corresponding to each operation. If the client + used to create this instance was a pipeline, the list of values + will be present within the pipeline's execute. + """ + command = self.command + self.reset() + return self.client.execute_command(*command) diff --git a/infrastructure/lambda/task_queue_manager/redis/connection.py b/infrastructure/lambda/task_queue_manager/redis/connection.py new file mode 100644 index 0000000..9781b8c --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis/connection.py @@ -0,0 +1,1384 @@ +from __future__ import unicode_literals +from distutils.version import StrictVersion +from itertools import chain +from time import time +import errno +import io +import os +import socket +import threading +import warnings + +from redis._compat import (xrange, imap, unicode, long, + nativestr, basestring, iteritems, + LifoQueue, Empty, Full, urlparse, parse_qs, + recv, recv_into, unquote, BlockingIOError, + sendall, shutdown, ssl_wrap_socket) +from redis.exceptions import ( + AuthenticationError, + AuthenticationWrongNumberOfArgsError, + BusyLoadingError, + ChildDeadlockedError, + ConnectionError, + DataError, + ExecAbortError, + InvalidResponse, + NoPermissionError, + NoScriptError, + ReadOnlyError, + RedisError, + ResponseError, + TimeoutError, +) +from redis.utils import HIREDIS_AVAILABLE + +try: + import ssl + ssl_available = True +except ImportError: + ssl_available = False + +NONBLOCKING_EXCEPTION_ERROR_NUMBERS = { + BlockingIOError: errno.EWOULDBLOCK, +} + +if ssl_available: + if hasattr(ssl, 'SSLWantReadError'): + NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2 + NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2 + else: + NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLError] = 2 + +# In Python 2.7 a socket.error is raised for a nonblocking read. +# The _compat module aliases BlockingIOError to socket.error to be +# Python 2/3 compatible. +# However this means that all socket.error exceptions need to be handled +# properly within these exception handlers. +# We need to make sure socket.error is included in these handlers and +# provide a dummy error number that will never match a real exception. +if socket.error not in NONBLOCKING_EXCEPTION_ERROR_NUMBERS: + NONBLOCKING_EXCEPTION_ERROR_NUMBERS[socket.error] = -999999 + +NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys()) + +if HIREDIS_AVAILABLE: + import hiredis + + hiredis_version = StrictVersion(hiredis.__version__) + HIREDIS_SUPPORTS_CALLABLE_ERRORS = \ + hiredis_version >= StrictVersion('0.1.3') + HIREDIS_SUPPORTS_BYTE_BUFFER = \ + hiredis_version >= StrictVersion('0.1.4') + HIREDIS_SUPPORTS_ENCODING_ERRORS = \ + hiredis_version >= StrictVersion('1.0.0') + + if not HIREDIS_SUPPORTS_BYTE_BUFFER: + msg = ("redis-py works best with hiredis >= 0.1.4. You're running " + "hiredis %s. Please consider upgrading." % hiredis.__version__) + warnings.warn(msg) + + HIREDIS_USE_BYTE_BUFFER = True + # only use byte buffer if hiredis supports it + if not HIREDIS_SUPPORTS_BYTE_BUFFER: + HIREDIS_USE_BYTE_BUFFER = False + +SYM_STAR = b'*' +SYM_DOLLAR = b'$' +SYM_CRLF = b'\r\n' +SYM_EMPTY = b'' + +SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server." + +SENTINEL = object() + + +class Encoder(object): + "Encode strings to bytes-like and decode bytes-like to strings" + + def __init__(self, encoding, encoding_errors, decode_responses): + self.encoding = encoding + self.encoding_errors = encoding_errors + self.decode_responses = decode_responses + + def encode(self, value): + "Return a bytestring or bytes-like representation of the value" + if isinstance(value, (bytes, memoryview)): + return value + elif isinstance(value, bool): + # special case bool since it is a subclass of int + raise DataError("Invalid input of type: 'bool'. Convert to a " + "bytes, string, int or float first.") + elif isinstance(value, float): + value = repr(value).encode() + elif isinstance(value, (int, long)): + # python 2 repr() on longs is '123L', so use str() instead + value = str(value).encode() + elif not isinstance(value, basestring): + # a value we don't know how to deal with. throw an error + typename = type(value).__name__ + raise DataError("Invalid input of type: '%s'. Convert to a " + "bytes, string, int or float first." % typename) + if isinstance(value, unicode): + value = value.encode(self.encoding, self.encoding_errors) + return value + + def decode(self, value, force=False): + "Return a unicode string from the bytes-like representation" + if self.decode_responses or force: + if isinstance(value, memoryview): + value = value.tobytes() + if isinstance(value, bytes): + value = value.decode(self.encoding, self.encoding_errors) + return value + + +class BaseParser(object): + EXCEPTION_CLASSES = { + 'ERR': { + 'max number of clients reached': ConnectionError, + 'Client sent AUTH, but no password is set': AuthenticationError, + 'invalid password': AuthenticationError, + # some Redis server versions report invalid command syntax + # in lowercase + 'wrong number of arguments for \'auth\' command': + AuthenticationWrongNumberOfArgsError, + # some Redis server versions report invalid command syntax + # in uppercase + 'wrong number of arguments for \'AUTH\' command': + AuthenticationWrongNumberOfArgsError, + }, + 'EXECABORT': ExecAbortError, + 'LOADING': BusyLoadingError, + 'NOSCRIPT': NoScriptError, + 'READONLY': ReadOnlyError, + 'NOAUTH': AuthenticationError, + 'NOPERM': NoPermissionError, + } + + def parse_error(self, response): + "Parse an error response" + error_code = response.split(' ')[0] + if error_code in self.EXCEPTION_CLASSES: + response = response[len(error_code) + 1:] + exception_class = self.EXCEPTION_CLASSES[error_code] + if isinstance(exception_class, dict): + exception_class = exception_class.get(response, ResponseError) + return exception_class(response) + return ResponseError(response) + + +class SocketBuffer(object): + def __init__(self, socket, socket_read_size, socket_timeout): + self._sock = socket + self.socket_read_size = socket_read_size + self.socket_timeout = socket_timeout + self._buffer = io.BytesIO() + # number of bytes written to the buffer from the socket + self.bytes_written = 0 + # number of bytes read from the buffer + self.bytes_read = 0 + + @property + def length(self): + return self.bytes_written - self.bytes_read + + def _read_from_socket(self, length=None, timeout=SENTINEL, + raise_on_timeout=True): + sock = self._sock + socket_read_size = self.socket_read_size + buf = self._buffer + buf.seek(self.bytes_written) + marker = 0 + custom_timeout = timeout is not SENTINEL + + try: + if custom_timeout: + sock.settimeout(timeout) + while True: + data = recv(self._sock, socket_read_size) + # an empty string indicates the server shutdown the socket + if isinstance(data, bytes) and len(data) == 0: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + buf.write(data) + data_length = len(data) + self.bytes_written += data_length + marker += data_length + + if length is not None and length > marker: + continue + return True + except socket.timeout: + if raise_on_timeout: + raise TimeoutError("Timeout reading from socket") + return False + except NONBLOCKING_EXCEPTIONS as ex: + # if we're in nonblocking mode and the recv raises a + # blocking error, simply return False indicating that + # there's no data to be read. otherwise raise the + # original exception. + allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) + if not raise_on_timeout and ex.errno == allowed: + return False + raise ConnectionError("Error while reading from socket: %s" % + (ex.args,)) + finally: + if custom_timeout: + sock.settimeout(self.socket_timeout) + + def can_read(self, timeout): + return bool(self.length) or \ + self._read_from_socket(timeout=timeout, + raise_on_timeout=False) + + def read(self, length): + length = length + 2 # make sure to read the \r\n terminator + # make sure we've read enough data from the socket + if length > self.length: + self._read_from_socket(length - self.length) + + self._buffer.seek(self.bytes_read) + data = self._buffer.read(length) + self.bytes_read += len(data) + + # purge the buffer when we've consumed it all so it doesn't + # grow forever + if self.bytes_read == self.bytes_written: + self.purge() + + return data[:-2] + + def readline(self): + buf = self._buffer + buf.seek(self.bytes_read) + data = buf.readline() + while not data.endswith(SYM_CRLF): + # there's more data in the socket that we need + self._read_from_socket() + buf.seek(self.bytes_read) + data = buf.readline() + + self.bytes_read += len(data) + + # purge the buffer when we've consumed it all so it doesn't + # grow forever + if self.bytes_read == self.bytes_written: + self.purge() + + return data[:-2] + + def purge(self): + self._buffer.seek(0) + self._buffer.truncate() + self.bytes_written = 0 + self.bytes_read = 0 + + def close(self): + try: + self.purge() + self._buffer.close() + except Exception: + # issue #633 suggests the purge/close somehow raised a + # BadFileDescriptor error. Perhaps the client ran out of + # memory or something else? It's probably OK to ignore + # any error being raised from purge/close since we're + # removing the reference to the instance below. + pass + self._buffer = None + self._sock = None + + +class PythonParser(BaseParser): + "Plain Python parsing class" + def __init__(self, socket_read_size): + self.socket_read_size = socket_read_size + self.encoder = None + self._sock = None + self._buffer = None + + def __del__(self): + self.on_disconnect() + + def on_connect(self, connection): + "Called when the socket connects" + self._sock = connection._sock + self._buffer = SocketBuffer(self._sock, + self.socket_read_size, + connection.socket_timeout) + self.encoder = connection.encoder + + def on_disconnect(self): + "Called when the socket disconnects" + self._sock = None + if self._buffer is not None: + self._buffer.close() + self._buffer = None + self.encoder = None + + def can_read(self, timeout): + return self._buffer and self._buffer.can_read(timeout) + + def read_response(self): + raw = self._buffer.readline() + if not raw: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + byte, response = raw[:1], raw[1:] + + if byte not in (b'-', b'+', b':', b'$', b'*'): + raise InvalidResponse("Protocol Error: %r" % raw) + + # server returned an error + if byte == b'-': + response = nativestr(response) + error = self.parse_error(response) + # if the error is a ConnectionError, raise immediately so the user + # is notified + if isinstance(error, ConnectionError): + raise error + # otherwise, we're dealing with a ResponseError that might belong + # inside a pipeline response. the connection's read_response() + # and/or the pipeline's execute() will raise this error if + # necessary, so just return the exception instance here. + return error + # single value + elif byte == b'+': + pass + # int value + elif byte == b':': + response = long(response) + # bulk response + elif byte == b'$': + length = int(response) + if length == -1: + return None + response = self._buffer.read(length) + # multi-bulk response + elif byte == b'*': + length = int(response) + if length == -1: + return None + response = [self.read_response() for i in xrange(length)] + if isinstance(response, bytes): + response = self.encoder.decode(response) + return response + + +class HiredisParser(BaseParser): + "Parser class for connections using Hiredis" + def __init__(self, socket_read_size): + if not HIREDIS_AVAILABLE: + raise RedisError("Hiredis is not installed") + self.socket_read_size = socket_read_size + + if HIREDIS_USE_BYTE_BUFFER: + self._buffer = bytearray(socket_read_size) + + def __del__(self): + self.on_disconnect() + + def on_connect(self, connection): + self._sock = connection._sock + self._socket_timeout = connection.socket_timeout + kwargs = { + 'protocolError': InvalidResponse, + 'replyError': self.parse_error, + } + + # hiredis < 0.1.3 doesn't support functions that create exceptions + if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: + kwargs['replyError'] = ResponseError + + if connection.encoder.decode_responses: + kwargs['encoding'] = connection.encoder.encoding + if HIREDIS_SUPPORTS_ENCODING_ERRORS: + kwargs['errors'] = connection.encoder.encoding_errors + self._reader = hiredis.Reader(**kwargs) + self._next_response = False + + def on_disconnect(self): + self._sock = None + self._reader = None + self._next_response = False + + def can_read(self, timeout): + if not self._reader: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + if self._next_response is False: + self._next_response = self._reader.gets() + if self._next_response is False: + return self.read_from_socket(timeout=timeout, + raise_on_timeout=False) + return True + + def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True): + sock = self._sock + custom_timeout = timeout is not SENTINEL + try: + if custom_timeout: + sock.settimeout(timeout) + if HIREDIS_USE_BYTE_BUFFER: + bufflen = recv_into(self._sock, self._buffer) + if bufflen == 0: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + self._reader.feed(self._buffer, 0, bufflen) + else: + buffer = recv(self._sock, self.socket_read_size) + # an empty string indicates the server shutdown the socket + if not isinstance(buffer, bytes) or len(buffer) == 0: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + self._reader.feed(buffer) + # data was read from the socket and added to the buffer. + # return True to indicate that data was read. + return True + except socket.timeout: + if raise_on_timeout: + raise TimeoutError("Timeout reading from socket") + return False + except NONBLOCKING_EXCEPTIONS as ex: + # if we're in nonblocking mode and the recv raises a + # blocking error, simply return False indicating that + # there's no data to be read. otherwise raise the + # original exception. + allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) + if not raise_on_timeout and ex.errno == allowed: + return False + raise ConnectionError("Error while reading from socket: %s" % + (ex.args,)) + finally: + if custom_timeout: + sock.settimeout(self._socket_timeout) + + def read_response(self): + if not self._reader: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + # _next_response might be cached from a can_read() call + if self._next_response is not False: + response = self._next_response + self._next_response = False + return response + + response = self._reader.gets() + while response is False: + self.read_from_socket() + response = self._reader.gets() + # if an older version of hiredis is installed, we need to attempt + # to convert ResponseErrors to their appropriate types. + if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: + if isinstance(response, ResponseError): + response = self.parse_error(response.args[0]) + elif isinstance(response, list) and response and \ + isinstance(response[0], ResponseError): + response[0] = self.parse_error(response[0].args[0]) + # if the response is a ConnectionError or the response is a list and + # the first item is a ConnectionError, raise it as something bad + # happened + if isinstance(response, ConnectionError): + raise response + elif isinstance(response, list) and response and \ + isinstance(response[0], ConnectionError): + raise response[0] + return response + + +if HIREDIS_AVAILABLE: + DefaultParser = HiredisParser +else: + DefaultParser = PythonParser + + +class Connection(object): + "Manages TCP communication to and from a Redis server" + + def __init__(self, host='localhost', port=6379, db=0, password=None, + socket_timeout=None, socket_connect_timeout=None, + socket_keepalive=False, socket_keepalive_options=None, + socket_type=0, retry_on_timeout=False, encoding='utf-8', + encoding_errors='strict', decode_responses=False, + parser_class=DefaultParser, socket_read_size=65536, + health_check_interval=0, client_name=None, username=None): + self.pid = os.getpid() + self.host = host + self.port = int(port) + self.db = db + self.username = username + self.client_name = client_name + self.password = password + self.socket_timeout = socket_timeout + self.socket_connect_timeout = socket_connect_timeout or socket_timeout + self.socket_keepalive = socket_keepalive + self.socket_keepalive_options = socket_keepalive_options or {} + self.socket_type = socket_type + self.retry_on_timeout = retry_on_timeout + self.health_check_interval = health_check_interval + self.next_health_check = 0 + self.encoder = Encoder(encoding, encoding_errors, decode_responses) + self._sock = None + self._parser = parser_class(socket_read_size=socket_read_size) + self._connect_callbacks = [] + self._buffer_cutoff = 6000 + + def __repr__(self): + repr_args = ','.join(['%s=%s' % (k, v) for k, v in self.repr_pieces()]) + return '%s<%s>' % (self.__class__.__name__, repr_args) + + def repr_pieces(self): + pieces = [ + ('host', self.host), + ('port', self.port), + ('db', self.db) + ] + if self.client_name: + pieces.append(('client_name', self.client_name)) + return pieces + + def __del__(self): + self.disconnect() + + def register_connect_callback(self, callback): + self._connect_callbacks.append(callback) + + def clear_connect_callbacks(self): + self._connect_callbacks = [] + + def connect(self): + "Connects to the Redis server if not already connected" + if self._sock: + return + try: + sock = self._connect() + except socket.timeout: + raise TimeoutError("Timeout connecting to server") + except socket.error as e: + raise ConnectionError(self._error_message(e)) + + self._sock = sock + try: + self.on_connect() + except RedisError: + # clean up after any error in on_connect + self.disconnect() + raise + + # run any user callbacks. right now the only internal callback + # is for pubsub channel/pattern resubscription + for callback in self._connect_callbacks: + callback(self) + + def _connect(self): + "Create a TCP socket connection" + # we want to mimic what socket.create_connection does to support + # ipv4/ipv6, but we want to set options prior to calling + # socket.connect() + err = None + for res in socket.getaddrinfo(self.host, self.port, self.socket_type, + socket.SOCK_STREAM): + family, socktype, proto, canonname, socket_address = res + sock = None + try: + sock = socket.socket(family, socktype, proto) + # TCP_NODELAY + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + # TCP_KEEPALIVE + if self.socket_keepalive: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + for k, v in iteritems(self.socket_keepalive_options): + sock.setsockopt(socket.IPPROTO_TCP, k, v) + + # set the socket_connect_timeout before we connect + sock.settimeout(self.socket_connect_timeout) + + # connect + sock.connect(socket_address) + + # set the socket_timeout now that we're connected + sock.settimeout(self.socket_timeout) + return sock + + except socket.error as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + raise err + raise socket.error("socket.getaddrinfo returned an empty list") + + def _error_message(self, exception): + # args for socket.error can either be (errno, "message") + # or just "message" + if len(exception.args) == 1: + return "Error connecting to %s:%s. %s." % \ + (self.host, self.port, exception.args[0]) + else: + return "Error %s connecting to %s:%s. %s." % \ + (exception.args[0], self.host, self.port, exception.args[1]) + + def on_connect(self): + "Initialize the connection, authenticate and select a database" + self._parser.on_connect(self) + + # if username and/or password are set, authenticate + if self.username or self.password: + if self.username: + auth_args = (self.username, self.password or '') + else: + auth_args = (self.password,) + # avoid checking health here -- PING will fail if we try + # to check the health prior to the AUTH + self.send_command('AUTH', *auth_args, check_health=False) + + try: + auth_response = self.read_response() + except AuthenticationWrongNumberOfArgsError: + # a username and password were specified but the Redis + # server seems to be < 6.0.0 which expects a single password + # arg. retry auth with just the password. + # https://github.com/andymccurdy/redis-py/issues/1274 + self.send_command('AUTH', self.password, check_health=False) + auth_response = self.read_response() + + if nativestr(auth_response) != 'OK': + raise AuthenticationError('Invalid Username or Password') + + # if a client_name is given, set it + if self.client_name: + self.send_command('CLIENT', 'SETNAME', self.client_name) + if nativestr(self.read_response()) != 'OK': + raise ConnectionError('Error setting client name') + + # if a database is specified, switch to it + if self.db: + self.send_command('SELECT', self.db) + if nativestr(self.read_response()) != 'OK': + raise ConnectionError('Invalid Database') + + def disconnect(self): + "Disconnects from the Redis server" + self._parser.on_disconnect() + if self._sock is None: + return + try: + if os.getpid() == self.pid: + shutdown(self._sock, socket.SHUT_RDWR) + self._sock.close() + except socket.error: + pass + self._sock = None + + def check_health(self): + "Check the health of the connection with a PING/PONG" + if self.health_check_interval and time() > self.next_health_check: + try: + self.send_command('PING', check_health=False) + if nativestr(self.read_response()) != 'PONG': + raise ConnectionError( + 'Bad response from PING health check') + except (ConnectionError, TimeoutError): + self.disconnect() + self.send_command('PING', check_health=False) + if nativestr(self.read_response()) != 'PONG': + raise ConnectionError( + 'Bad response from PING health check') + + def send_packed_command(self, command, check_health=True): + "Send an already packed command to the Redis server" + if not self._sock: + self.connect() + # guard against health check recursion + if check_health: + self.check_health() + try: + if isinstance(command, str): + command = [command] + for item in command: + sendall(self._sock, item) + except socket.timeout: + self.disconnect() + raise TimeoutError("Timeout writing to socket") + except socket.error as e: + self.disconnect() + if len(e.args) == 1: + errno, errmsg = 'UNKNOWN', e.args[0] + else: + errno = e.args[0] + errmsg = e.args[1] + raise ConnectionError("Error %s while writing to socket. %s." % + (errno, errmsg)) + except BaseException: + self.disconnect() + raise + + def send_command(self, *args, **kwargs): + "Pack and send a command to the Redis server" + self.send_packed_command(self.pack_command(*args), + check_health=kwargs.get('check_health', True)) + + def can_read(self, timeout=0): + "Poll the socket to see if there's data that can be read." + sock = self._sock + if not sock: + self.connect() + sock = self._sock + return self._parser.can_read(timeout) + + def read_response(self): + "Read the response from a previously sent command" + try: + response = self._parser.read_response() + except socket.timeout: + self.disconnect() + raise TimeoutError("Timeout reading from %s:%s" % + (self.host, self.port)) + except socket.error as e: + self.disconnect() + raise ConnectionError("Error while reading from %s:%s : %s" % + (self.host, self.port, e.args)) + except BaseException: + self.disconnect() + raise + + if self.health_check_interval: + self.next_health_check = time() + self.health_check_interval + + if isinstance(response, ResponseError): + raise response + return response + + def pack_command(self, *args): + "Pack a series of arguments into the Redis protocol" + output = [] + # the client might have included 1 or more literal arguments in + # the command name, e.g., 'CONFIG GET'. The Redis server expects these + # arguments to be sent separately, so split the first argument + # manually. These arguments should be bytestrings so that they are + # not encoded. + if isinstance(args[0], unicode): + args = tuple(args[0].encode().split()) + args[1:] + elif b' ' in args[0]: + args = tuple(args[0].split()) + args[1:] + + buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF)) + + buffer_cutoff = self._buffer_cutoff + for arg in imap(self.encoder.encode, args): + # to avoid large string mallocs, chunk the command into the + # output list if we're sending large values or memoryviews + arg_length = len(arg) + if (len(buff) > buffer_cutoff or arg_length > buffer_cutoff + or isinstance(arg, memoryview)): + buff = SYM_EMPTY.join( + (buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)) + output.append(buff) + output.append(arg) + buff = SYM_CRLF + else: + buff = SYM_EMPTY.join( + (buff, SYM_DOLLAR, str(arg_length).encode(), + SYM_CRLF, arg, SYM_CRLF)) + output.append(buff) + return output + + def pack_commands(self, commands): + "Pack multiple commands into the Redis protocol" + output = [] + pieces = [] + buffer_length = 0 + buffer_cutoff = self._buffer_cutoff + + for cmd in commands: + for chunk in self.pack_command(*cmd): + chunklen = len(chunk) + if (buffer_length > buffer_cutoff or chunklen > buffer_cutoff + or isinstance(chunk, memoryview)): + output.append(SYM_EMPTY.join(pieces)) + buffer_length = 0 + pieces = [] + + if chunklen > buffer_cutoff or isinstance(chunk, memoryview): + output.append(chunk) + else: + pieces.append(chunk) + buffer_length += chunklen + + if pieces: + output.append(SYM_EMPTY.join(pieces)) + return output + + +class SSLConnection(Connection): + + def __init__(self, ssl_keyfile=None, ssl_certfile=None, + ssl_cert_reqs='required', ssl_ca_certs=None, + ssl_check_hostname=False, **kwargs): + if not ssl_available: + raise RedisError("Python wasn't built with SSL support") + + super(SSLConnection, self).__init__(**kwargs) + + self.keyfile = ssl_keyfile + self.certfile = ssl_certfile + if ssl_cert_reqs is None: + ssl_cert_reqs = ssl.CERT_NONE + elif isinstance(ssl_cert_reqs, basestring): + CERT_REQS = { + 'none': ssl.CERT_NONE, + 'optional': ssl.CERT_OPTIONAL, + 'required': ssl.CERT_REQUIRED + } + if ssl_cert_reqs not in CERT_REQS: + raise RedisError( + "Invalid SSL Certificate Requirements Flag: %s" % + ssl_cert_reqs) + ssl_cert_reqs = CERT_REQS[ssl_cert_reqs] + self.cert_reqs = ssl_cert_reqs + self.ca_certs = ssl_ca_certs + self.check_hostname = ssl_check_hostname + + def _connect(self): + "Wrap the socket with SSL support" + sock = super(SSLConnection, self)._connect() + if hasattr(ssl, "create_default_context"): + context = ssl.create_default_context() + context.check_hostname = self.check_hostname + context.verify_mode = self.cert_reqs + if self.certfile and self.keyfile: + context.load_cert_chain(certfile=self.certfile, + keyfile=self.keyfile) + if self.ca_certs: + context.load_verify_locations(self.ca_certs) + sock = ssl_wrap_socket(context, sock, server_hostname=self.host) + else: + # In case this code runs in a version which is older than 2.7.9, + # we want to fall back to old code + sock = ssl_wrap_socket(ssl, + sock, + cert_reqs=self.cert_reqs, + keyfile=self.keyfile, + certfile=self.certfile, + ca_certs=self.ca_certs) + return sock + + +class UnixDomainSocketConnection(Connection): + + def __init__(self, path='', db=0, username=None, password=None, + socket_timeout=None, encoding='utf-8', + encoding_errors='strict', decode_responses=False, + retry_on_timeout=False, + parser_class=DefaultParser, socket_read_size=65536, + health_check_interval=0, client_name=None): + self.pid = os.getpid() + self.path = path + self.db = db + self.username = username + self.client_name = client_name + self.password = password + self.socket_timeout = socket_timeout + self.retry_on_timeout = retry_on_timeout + self.health_check_interval = health_check_interval + self.next_health_check = 0 + self.encoder = Encoder(encoding, encoding_errors, decode_responses) + self._sock = None + self._parser = parser_class(socket_read_size=socket_read_size) + self._connect_callbacks = [] + self._buffer_cutoff = 6000 + + def repr_pieces(self): + pieces = [ + ('path', self.path), + ('db', self.db), + ] + if self.client_name: + pieces.append(('client_name', self.client_name)) + return pieces + + def _connect(self): + "Create a Unix domain socket connection" + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.settimeout(self.socket_timeout) + sock.connect(self.path) + return sock + + def _error_message(self, exception): + # args for socket.error can either be (errno, "message") + # or just "message" + if len(exception.args) == 1: + return "Error connecting to unix socket: %s. %s." % \ + (self.path, exception.args[0]) + else: + return "Error %s connecting to unix socket: %s. %s." % \ + (exception.args[0], self.path, exception.args[1]) + + +FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO') + + +def to_bool(value): + if value is None or value == '': + return None + if isinstance(value, basestring) and value.upper() in FALSE_STRINGS: + return False + return bool(value) + + +URL_QUERY_ARGUMENT_PARSERS = { + 'socket_timeout': float, + 'socket_connect_timeout': float, + 'socket_keepalive': to_bool, + 'retry_on_timeout': to_bool, + 'max_connections': int, + 'health_check_interval': int, + 'ssl_check_hostname': to_bool, +} + + +class ConnectionPool(object): + "Generic connection pool" + @classmethod + def from_url(cls, url, db=None, decode_components=False, **kwargs): + """ + Return a connection pool configured from the given URL. + + For example:: + + redis://[[username]:[password]]@localhost:6379/0 + rediss://[[username]:[password]]@localhost:6379/0 + unix://[[username]:[password]]@/path/to/socket.sock?db=0 + + Three URL schemes are supported: + + - ```redis://`` + `_ creates a + normal TCP socket connection + - ```rediss://`` + `_ creates + a SSL wrapped TCP socket connection + - ``unix://`` creates a Unix Domain Socket connection + + There are several ways to specify a database number. The parse function + will return the first specified option: + 1. A ``db`` querystring option, e.g. redis://localhost?db=0 + 2. If using the redis:// scheme, the path argument of the url, e.g. + redis://localhost/0 + 3. The ``db`` argument to this function. + + If none of these options are specified, db=0 is used. + + The ``decode_components`` argument allows this function to work with + percent-encoded URLs. If this argument is set to ``True`` all ``%xx`` + escapes will be replaced by their single-character equivalents after + the URL has been parsed. This only applies to the ``hostname``, + ``path``, ``username`` and ``password`` components. + + Any additional querystring arguments and keyword arguments will be + passed along to the ConnectionPool class's initializer. The querystring + arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied + are parsed as float values. The arguments ``socket_keepalive`` and + ``retry_on_timeout`` are parsed to boolean values that accept + True/False, Yes/No values to indicate state. Invalid types cause a + ``UserWarning`` to be raised. In the case of conflicting arguments, + querystring arguments always win. + + """ + url = urlparse(url) + url_options = {} + + for name, value in iteritems(parse_qs(url.query)): + if value and len(value) > 0: + parser = URL_QUERY_ARGUMENT_PARSERS.get(name) + if parser: + try: + url_options[name] = parser(value[0]) + except (TypeError, ValueError): + warnings.warn(UserWarning( + "Invalid value for `%s` in connection URL." % name + )) + else: + url_options[name] = value[0] + + if decode_components: + username = unquote(url.username) if url.username else None + password = unquote(url.password) if url.password else None + path = unquote(url.path) if url.path else None + hostname = unquote(url.hostname) if url.hostname else None + else: + username = url.username or None + password = url.password or None + path = url.path + hostname = url.hostname + + # We only support redis://, rediss:// and unix:// schemes. + if url.scheme == 'unix': + url_options.update({ + 'username': username, + 'password': password, + 'path': path, + 'connection_class': UnixDomainSocketConnection, + }) + + elif url.scheme in ('redis', 'rediss'): + url_options.update({ + 'host': hostname, + 'port': int(url.port or 6379), + 'username': username, + 'password': password, + }) + + # If there's a path argument, use it as the db argument if a + # querystring value wasn't specified + if 'db' not in url_options and path: + try: + url_options['db'] = int(path.replace('/', '')) + except (AttributeError, ValueError): + pass + + if url.scheme == 'rediss': + url_options['connection_class'] = SSLConnection + else: + valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://')) + raise ValueError('Redis URL must specify one of the following ' + 'schemes (%s)' % valid_schemes) + + # last shot at the db value + url_options['db'] = int(url_options.get('db', db or 0)) + + # update the arguments from the URL values + kwargs.update(url_options) + + # backwards compatability + if 'charset' in kwargs: + warnings.warn(DeprecationWarning( + '"charset" is deprecated. Use "encoding" instead')) + kwargs['encoding'] = kwargs.pop('charset') + if 'errors' in kwargs: + warnings.warn(DeprecationWarning( + '"errors" is deprecated. Use "encoding_errors" instead')) + kwargs['encoding_errors'] = kwargs.pop('errors') + + return cls(**kwargs) + + def __init__(self, connection_class=Connection, max_connections=None, + **connection_kwargs): + """ + Create a connection pool. If max_connections is set, then this + object raises redis.ConnectionError when the pool's limit is reached. + + By default, TCP connections are created unless connection_class is + specified. Use redis.UnixDomainSocketConnection for unix sockets. + + Any additional keyword arguments are passed to the constructor of + connection_class. + """ + max_connections = max_connections or 2 ** 31 + if not isinstance(max_connections, (int, long)) or max_connections < 0: + raise ValueError('"max_connections" must be a positive integer') + + self.connection_class = connection_class + self.connection_kwargs = connection_kwargs + self.max_connections = max_connections + + # a lock to protect the critical section in _checkpid(). + # this lock is acquired when the process id changes, such as + # after a fork. during this time, multiple threads in the child + # process could attempt to acquire this lock. the first thread + # to acquire the lock will reset the data structures and lock + # object of this pool. subsequent threads acquiring this lock + # will notice the first thread already did the work and simply + # release the lock. + self._fork_lock = threading.Lock() + self.reset() + + def __repr__(self): + return "%s<%s>" % ( + type(self).__name__, + repr(self.connection_class(**self.connection_kwargs)), + ) + + def reset(self): + self._lock = threading.Lock() + self._created_connections = 0 + self._available_connections = [] + self._in_use_connections = set() + + # this must be the last operation in this method. while reset() is + # called when holding _fork_lock, other threads in this process + # can call _checkpid() which compares self.pid and os.getpid() without + # holding any lock (for performance reasons). keeping this assignment + # as the last operation ensures that those other threads will also + # notice a pid difference and block waiting for the first thread to + # release _fork_lock. when each of these threads eventually acquire + # _fork_lock, they will notice that another thread already called + # reset() and they will immediately release _fork_lock and continue on. + self.pid = os.getpid() + + def _checkpid(self): + # _checkpid() attempts to keep ConnectionPool fork-safe on modern + # systems. this is called by all ConnectionPool methods that + # manipulate the pool's state such as get_connection() and release(). + # + # _checkpid() determines whether the process has forked by comparing + # the current process id to the process id saved on the ConnectionPool + # instance. if these values are the same, _checkpid() simply returns. + # + # when the process ids differ, _checkpid() assumes that the process + # has forked and that we're now running in the child process. the child + # process cannot use the parent's file descriptors (e.g., sockets). + # therefore, when _checkpid() sees the process id change, it calls + # reset() in order to reinitialize the child's ConnectionPool. this + # will cause the child to make all new connection objects. + # + # _checkpid() is protected by self._fork_lock to ensure that multiple + # threads in the child process do not call reset() multiple times. + # + # there is an extremely small chance this could fail in the following + # scenario: + # 1. process A calls _checkpid() for the first time and acquires + # self._fork_lock. + # 2. while holding self._fork_lock, process A forks (the fork() + # could happen in a different thread owned by process A) + # 3. process B (the forked child process) inherits the + # ConnectionPool's state from the parent. that state includes + # a locked _fork_lock. process B will not be notified when + # process A releases the _fork_lock and will thus never be + # able to acquire the _fork_lock. + # + # to mitigate this possible deadlock, _checkpid() will only wait 5 + # seconds to acquire _fork_lock. if _fork_lock cannot be acquired in + # that time it is assumed that the child is deadlocked and a + # redis.ChildDeadlockedError error is raised. + if self.pid != os.getpid(): + # python 2.7 doesn't support a timeout option to lock.acquire() + # we have to mimic lock timeouts ourselves. + timeout_at = time() + 5 + acquired = False + while time() < timeout_at: + acquired = self._fork_lock.acquire(False) + if acquired: + break + if not acquired: + raise ChildDeadlockedError + # reset() the instance for the new process if another thread + # hasn't already done so + try: + if self.pid != os.getpid(): + self.reset() + finally: + self._fork_lock.release() + + def get_connection(self, command_name, *keys, **options): + "Get a connection from the pool" + self._checkpid() + with self._lock: + try: + connection = self._available_connections.pop() + except IndexError: + connection = self.make_connection() + self._in_use_connections.add(connection) + + try: + # ensure this connection is connected to Redis + connection.connect() + # connections that the pool provides should be ready to send + # a command. if not, the connection was either returned to the + # pool before all data has been read or the socket has been + # closed. either way, reconnect and verify everything is good. + try: + if connection.can_read(): + raise ConnectionError('Connection has data') + except ConnectionError: + connection.disconnect() + connection.connect() + if connection.can_read(): + raise ConnectionError('Connection not ready') + except BaseException: + # release the connection back to the pool so that we don't + # leak it + self.release(connection) + raise + + return connection + + def get_encoder(self): + "Return an encoder based on encoding settings" + kwargs = self.connection_kwargs + return Encoder( + encoding=kwargs.get('encoding', 'utf-8'), + encoding_errors=kwargs.get('encoding_errors', 'strict'), + decode_responses=kwargs.get('decode_responses', False) + ) + + def make_connection(self): + "Create a new connection" + if self._created_connections >= self.max_connections: + raise ConnectionError("Too many connections") + self._created_connections += 1 + return self.connection_class(**self.connection_kwargs) + + def release(self, connection): + "Releases the connection back to the pool" + self._checkpid() + with self._lock: + if connection.pid != self.pid: + return + self._in_use_connections.remove(connection) + self._available_connections.append(connection) + + def disconnect(self): + "Disconnects all connections in the pool" + self._checkpid() + with self._lock: + all_conns = chain(self._available_connections, + self._in_use_connections) + for connection in all_conns: + connection.disconnect() + + +class BlockingConnectionPool(ConnectionPool): + """ + Thread-safe blocking connection pool:: + + >>> from redis.client import Redis + >>> client = Redis(connection_pool=BlockingConnectionPool()) + + It performs the same function as the default + ``:py:class: ~redis.connection.ConnectionPool`` implementation, in that, + it maintains a pool of reusable connections that can be shared by + multiple redis clients (safely across threads if required). + + The difference is that, in the event that a client tries to get a + connection from the pool when all of connections are in use, rather than + raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default + ``:py:class: ~redis.connection.ConnectionPool`` implementation does), it + makes the client wait ("blocks") for a specified number of seconds until + a connection becomes available. + + Use ``max_connections`` to increase / decrease the pool size:: + + >>> pool = BlockingConnectionPool(max_connections=10) + + Use ``timeout`` to tell it either how many seconds to wait for a connection + to become available, or to block forever: + + # Block forever. + >>> pool = BlockingConnectionPool(timeout=None) + + # Raise a ``ConnectionError`` after five seconds if a connection is + # not available. + >>> pool = BlockingConnectionPool(timeout=5) + """ + def __init__(self, max_connections=50, timeout=20, + connection_class=Connection, queue_class=LifoQueue, + **connection_kwargs): + + self.queue_class = queue_class + self.timeout = timeout + super(BlockingConnectionPool, self).__init__( + connection_class=connection_class, + max_connections=max_connections, + **connection_kwargs) + + def reset(self): + # Create and fill up a thread safe queue with ``None`` values. + self.pool = self.queue_class(self.max_connections) + while True: + try: + self.pool.put_nowait(None) + except Full: + break + + # Keep a list of actual connection instances so that we can + # disconnect them later. + self._connections = [] + + # this must be the last operation in this method. while reset() is + # called when holding _fork_lock, other threads in this process + # can call _checkpid() which compares self.pid and os.getpid() without + # holding any lock (for performance reasons). keeping this assignment + # as the last operation ensures that those other threads will also + # notice a pid difference and block waiting for the first thread to + # release _fork_lock. when each of these threads eventually acquire + # _fork_lock, they will notice that another thread already called + # reset() and they will immediately release _fork_lock and continue on. + self.pid = os.getpid() + + def make_connection(self): + "Make a fresh connection." + connection = self.connection_class(**self.connection_kwargs) + self._connections.append(connection) + return connection + + def get_connection(self, command_name, *keys, **options): + """ + Get a connection, blocking for ``self.timeout`` until a connection + is available from the pool. + + If the connection returned is ``None`` then creates a new connection. + Because we use a last-in first-out queue, the existing connections + (having been returned to the pool after the initial ``None`` values + were added) will be returned before ``None`` values. This means we only + create new connections when we need to, i.e.: the actual number of + connections will only increase in response to demand. + """ + # Make sure we haven't changed process. + self._checkpid() + + # Try and get a connection from the pool. If one isn't available within + # self.timeout then raise a ``ConnectionError``. + connection = None + try: + connection = self.pool.get(block=True, timeout=self.timeout) + except Empty: + # Note that this is not caught by the redis client and will be + # raised unless handled by application code. If you want never to + raise ConnectionError("No connection available.") + + # If the ``connection`` is actually ``None`` then that's a cue to make + # a new connection to add to the pool. + if connection is None: + connection = self.make_connection() + + try: + # ensure this connection is connected to Redis + connection.connect() + # connections that the pool provides should be ready to send + # a command. if not, the connection was either returned to the + # pool before all data has been read or the socket has been + # closed. either way, reconnect and verify everything is good. + try: + if connection.can_read(): + raise ConnectionError('Connection has data') + except ConnectionError: + connection.disconnect() + connection.connect() + if connection.can_read(): + raise ConnectionError('Connection not ready') + except BaseException: + # release the connection back to the pool so that we don't leak it + self.release(connection) + raise + + return connection + + def release(self, connection): + "Releases the connection back to the pool." + # Make sure we haven't changed process. + self._checkpid() + if connection.pid != self.pid: + return + + # Put the connection back into the pool. + try: + self.pool.put_nowait(connection) + except Full: + # perhaps the pool has been reset() after a fork? regardless, + # we don't want this connection + pass + + def disconnect(self): + "Disconnects all connections in the pool." + self._checkpid() + for connection in self._connections: + connection.disconnect() diff --git a/infrastructure/lambda/task_queue_manager/redis/exceptions.py b/infrastructure/lambda/task_queue_manager/redis/exceptions.py new file mode 100644 index 0000000..760af66 --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis/exceptions.py @@ -0,0 +1,82 @@ +"Core exceptions raised by the Redis client" + + +class RedisError(Exception): + pass + + +class ConnectionError(RedisError): + pass + + +class TimeoutError(RedisError): + pass + + +class AuthenticationError(ConnectionError): + pass + + +class BusyLoadingError(ConnectionError): + pass + + +class InvalidResponse(RedisError): + pass + + +class ResponseError(RedisError): + pass + + +class DataError(RedisError): + pass + + +class PubSubError(RedisError): + pass + + +class WatchError(RedisError): + pass + + +class NoScriptError(ResponseError): + pass + + +class ExecAbortError(ResponseError): + pass + + +class ReadOnlyError(ResponseError): + pass + + +class NoPermissionError(ResponseError): + pass + + +class LockError(RedisError, ValueError): + "Errors acquiring or releasing a lock" + # NOTE: For backwards compatability, this class derives from ValueError. + # This was originally chosen to behave like threading.Lock. + pass + + +class LockNotOwnedError(LockError): + "Error trying to extend or release a lock that is (no longer) owned" + pass + + +class ChildDeadlockedError(Exception): + "Error indicating that a child process is deadlocked after a fork()" + pass + + +class AuthenticationWrongNumberOfArgsError(ResponseError): + """ + An error to indicate that the wrong number of args + were sent to the AUTH command + """ + pass diff --git a/infrastructure/lambda/task_queue_manager/redis/lock.py b/infrastructure/lambda/task_queue_manager/redis/lock.py new file mode 100644 index 0000000..5c47748 --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis/lock.py @@ -0,0 +1,293 @@ +import threading +import time as mod_time +import uuid +from redis.exceptions import LockError, LockNotOwnedError +from redis.utils import dummy + + +class Lock(object): + """ + A shared, distributed Lock. Using Redis for locking allows the Lock + to be shared across processes and/or machines. + + It's left to the user to resolve deadlock issues and make sure + multiple clients play nicely together. + """ + + lua_release = None + lua_extend = None + lua_reacquire = None + + # KEYS[1] - lock name + # ARGV[1] - token + # return 1 if the lock was released, otherwise 0 + LUA_RELEASE_SCRIPT = """ + local token = redis.call('get', KEYS[1]) + if not token or token ~= ARGV[1] then + return 0 + end + redis.call('del', KEYS[1]) + return 1 + """ + + # KEYS[1] - lock name + # ARGV[1] - token + # ARGV[2] - additional milliseconds + # ARGV[3] - "0" if the additional time should be added to the lock's + # existing ttl or "1" if the existing ttl should be replaced + # return 1 if the locks time was extended, otherwise 0 + LUA_EXTEND_SCRIPT = """ + local token = redis.call('get', KEYS[1]) + if not token or token ~= ARGV[1] then + return 0 + end + local expiration = redis.call('pttl', KEYS[1]) + if not expiration then + expiration = 0 + end + if expiration < 0 then + return 0 + end + + local newttl = ARGV[2] + if ARGV[3] == "0" then + newttl = ARGV[2] + expiration + end + redis.call('pexpire', KEYS[1], newttl) + return 1 + """ + + # KEYS[1] - lock name + # ARGV[1] - token + # ARGV[2] - milliseconds + # return 1 if the locks time was reacquired, otherwise 0 + LUA_REACQUIRE_SCRIPT = """ + local token = redis.call('get', KEYS[1]) + if not token or token ~= ARGV[1] then + return 0 + end + redis.call('pexpire', KEYS[1], ARGV[2]) + return 1 + """ + + def __init__(self, redis, name, timeout=None, sleep=0.1, + blocking=True, blocking_timeout=None, thread_local=True): + """ + Create a new Lock instance named ``name`` using the Redis client + supplied by ``redis``. + + ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + ``timeout`` can be specified as a float or integer, both representing + the number of seconds to wait. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + + ``blocking`` indicates whether calling ``acquire`` should block until + the lock has been acquired or to fail immediately, causing ``acquire`` + to return False and the lock not being acquired. Defaults to True. + Note this value can be overridden by passing a ``blocking`` + argument to ``acquire``. + + ``blocking_timeout`` indicates the maximum amount of time in seconds to + spend trying to acquire the lock. A value of ``None`` indicates + continue trying forever. ``blocking_timeout`` can be specified as a + float or integer, both representing the number of seconds to wait. + + ``thread_local`` indicates whether the lock token is placed in + thread-local storage. By default, the token is placed in thread local + storage so that a thread only sees its token, not a token set by + another thread. Consider the following timeline: + + time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. + thread-1 sets the token to "abc" + time: 1, thread-2 blocks trying to acquire `my-lock` using the + Lock instance. + time: 5, thread-1 has not yet completed. redis expires the lock + key. + time: 5, thread-2 acquired `my-lock` now that it's available. + thread-2 sets the token to "xyz" + time: 6, thread-1 finishes its work and calls release(). if the + token is *not* stored in thread local storage, then + thread-1 would see the token value as "xyz" and would be + able to successfully release the thread-2's lock. + + In some use cases it's necessary to disable thread local storage. For + example, if you have code where one thread acquires a lock and passes + that lock instance to a worker thread to release later. If thread + local storage isn't disabled in this case, the worker thread won't see + the token set by the thread that acquired the lock. Our assumption + is that these cases aren't common and as such default to using + thread local storage. + """ + self.redis = redis + self.name = name + self.timeout = timeout + self.sleep = sleep + self.blocking = blocking + self.blocking_timeout = blocking_timeout + self.thread_local = bool(thread_local) + self.local = threading.local() if self.thread_local else dummy() + self.local.token = None + self.register_scripts() + + def register_scripts(self): + cls = self.__class__ + client = self.redis + if cls.lua_release is None: + cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT) + if cls.lua_extend is None: + cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT) + if cls.lua_reacquire is None: + cls.lua_reacquire = \ + client.register_script(cls.LUA_REACQUIRE_SCRIPT) + + def __enter__(self): + # force blocking, as otherwise the user would have to check whether + # the lock was actually acquired or not. + if self.acquire(blocking=True): + return self + raise LockError("Unable to acquire lock within the time specified") + + def __exit__(self, exc_type, exc_value, traceback): + self.release() + + def acquire(self, blocking=None, blocking_timeout=None, token=None): + """ + Use Redis to hold a shared, distributed lock named ``name``. + Returns True once the lock is acquired. + + If ``blocking`` is False, always return immediately. If the lock + was acquired, return True, otherwise return False. + + ``blocking_timeout`` specifies the maximum number of seconds to + wait trying to acquire the lock. + + ``token`` specifies the token value to be used. If provided, token + must be a bytes object or a string that can be encoded to a bytes + object with the default encoding. If a token isn't specified, a UUID + will be generated. + """ + sleep = self.sleep + if token is None: + token = uuid.uuid1().hex.encode() + else: + encoder = self.redis.connection_pool.get_encoder() + token = encoder.encode(token) + if blocking is None: + blocking = self.blocking + if blocking_timeout is None: + blocking_timeout = self.blocking_timeout + stop_trying_at = None + if blocking_timeout is not None: + stop_trying_at = mod_time.time() + blocking_timeout + while True: + if self.do_acquire(token): + self.local.token = token + return True + if not blocking: + return False + next_try_at = mod_time.time() + sleep + if stop_trying_at is not None and next_try_at > stop_trying_at: + return False + mod_time.sleep(sleep) + + def do_acquire(self, token): + if self.timeout: + # convert to milliseconds + timeout = int(self.timeout * 1000) + else: + timeout = None + if self.redis.set(self.name, token, nx=True, px=timeout): + return True + return False + + def locked(self): + """ + Returns True if this key is locked by any process, otherwise False. + """ + return self.redis.get(self.name) is not None + + def owned(self): + """ + Returns True if this key is locked by this lock, otherwise False. + """ + stored_token = self.redis.get(self.name) + # need to always compare bytes to bytes + # TODO: this can be simplified when the context manager is finished + if stored_token and not isinstance(stored_token, bytes): + encoder = self.redis.connection_pool.get_encoder() + stored_token = encoder.encode(stored_token) + return self.local.token is not None and \ + stored_token == self.local.token + + def release(self): + "Releases the already acquired lock" + expected_token = self.local.token + if expected_token is None: + raise LockError("Cannot release an unlocked lock") + self.local.token = None + self.do_release(expected_token) + + def do_release(self, expected_token): + if not bool(self.lua_release(keys=[self.name], + args=[expected_token], + client=self.redis)): + raise LockNotOwnedError("Cannot release a lock" + " that's no longer owned") + + def extend(self, additional_time, replace_ttl=False): + """ + Adds more time to an already acquired lock. + + ``additional_time`` can be specified as an integer or a float, both + representing the number of seconds to add. + + ``replace_ttl`` if False (the default), add `additional_time` to + the lock's existing ttl. If True, replace the lock's ttl with + `additional_time`. + """ + if self.local.token is None: + raise LockError("Cannot extend an unlocked lock") + if self.timeout is None: + raise LockError("Cannot extend a lock with no timeout") + return self.do_extend(additional_time, replace_ttl) + + def do_extend(self, additional_time, replace_ttl): + additional_time = int(additional_time * 1000) + if not bool( + self.lua_extend( + keys=[self.name], + args=[ + self.local.token, + additional_time, + replace_ttl and "1" or "0" + ], + client=self.redis, + ) + ): + raise LockNotOwnedError( + "Cannot extend a lock that's" " no longer owned" + ) + return True + + def reacquire(self): + """ + Resets a TTL of an already acquired lock back to a timeout value. + """ + if self.local.token is None: + raise LockError("Cannot reacquire an unlocked lock") + if self.timeout is None: + raise LockError("Cannot reacquire a lock with no timeout") + return self.do_reacquire() + + def do_reacquire(self): + timeout = int(self.timeout * 1000) + if not bool(self.lua_reacquire(keys=[self.name], + args=[self.local.token, timeout], + client=self.redis)): + raise LockNotOwnedError("Cannot reacquire a lock that's" + " no longer owned") + return True diff --git a/infrastructure/lambda/task_queue_manager/redis/sentinel.py b/infrastructure/lambda/task_queue_manager/redis/sentinel.py new file mode 100644 index 0000000..ac5bf44 --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis/sentinel.py @@ -0,0 +1,286 @@ +import random +import weakref + +from redis.client import Redis +from redis.connection import ConnectionPool, Connection +from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError, + TimeoutError) +from redis._compat import iteritems, nativestr, xrange + + +class MasterNotFoundError(ConnectionError): + pass + + +class SlaveNotFoundError(ConnectionError): + pass + + +class SentinelManagedConnection(Connection): + def __init__(self, **kwargs): + self.connection_pool = kwargs.pop('connection_pool') + super(SentinelManagedConnection, self).__init__(**kwargs) + + def __repr__(self): + pool = self.connection_pool + s = '%s' % (type(self).__name__, pool.service_name) + if self.host: + host_info = ',host=%s,port=%s' % (self.host, self.port) + s = s % host_info + return s + + def connect_to(self, address): + self.host, self.port = address + super(SentinelManagedConnection, self).connect() + if self.connection_pool.check_connection: + self.send_command('PING') + if nativestr(self.read_response()) != 'PONG': + raise ConnectionError('PING failed') + + def connect(self): + if self._sock: + return # already connected + if self.connection_pool.is_master: + self.connect_to(self.connection_pool.get_master_address()) + else: + for slave in self.connection_pool.rotate_slaves(): + try: + return self.connect_to(slave) + except ConnectionError: + continue + raise SlaveNotFoundError # Never be here + + def read_response(self): + try: + return super(SentinelManagedConnection, self).read_response() + except ReadOnlyError: + if self.connection_pool.is_master: + # When talking to a master, a ReadOnlyError when likely + # indicates that the previous master that we're still connected + # to has been demoted to a slave and there's a new master. + # calling disconnect will force the connection to re-query + # sentinel during the next connect() attempt. + self.disconnect() + raise ConnectionError('The previous master is now a slave') + raise + + +class SentinelConnectionPool(ConnectionPool): + """ + Sentinel backed connection pool. + + If ``check_connection`` flag is set to True, SentinelManagedConnection + sends a PING command right after establishing the connection. + """ + + def __init__(self, service_name, sentinel_manager, **kwargs): + kwargs['connection_class'] = kwargs.get( + 'connection_class', SentinelManagedConnection) + self.is_master = kwargs.pop('is_master', True) + self.check_connection = kwargs.pop('check_connection', False) + super(SentinelConnectionPool, self).__init__(**kwargs) + self.connection_kwargs['connection_pool'] = weakref.proxy(self) + self.service_name = service_name + self.sentinel_manager = sentinel_manager + + def __repr__(self): + return "%s>> from redis.sentinel import Sentinel + >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) + >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) + >>> master.set('foo', 'bar') + >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) + >>> slave.get('foo') + b'bar' + + ``sentinels`` is a list of sentinel nodes. Each node is represented by + a pair (hostname, port). + + ``min_other_sentinels`` defined a minimum number of peers for a sentinel. + When querying a sentinel, if it doesn't meet this threshold, responses + from that sentinel won't be considered valid. + + ``sentinel_kwargs`` is a dictionary of connection arguments used when + connecting to sentinel instances. Any argument that can be passed to + a normal Redis connection can be specified here. If ``sentinel_kwargs`` is + not specified, any socket_timeout and socket_keepalive options specified + in ``connection_kwargs`` will be used. + + ``connection_kwargs`` are keyword arguments that will be used when + establishing a connection to a Redis server. + """ + + def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None, + **connection_kwargs): + # if sentinel_kwargs isn't defined, use the socket_* options from + # connection_kwargs + if sentinel_kwargs is None: + sentinel_kwargs = { + k: v + for k, v in iteritems(connection_kwargs) + if k.startswith('socket_') + } + self.sentinel_kwargs = sentinel_kwargs + + self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs) + for hostname, port in sentinels] + self.min_other_sentinels = min_other_sentinels + self.connection_kwargs = connection_kwargs + + def __repr__(self): + sentinel_addresses = [] + for sentinel in self.sentinels: + sentinel_addresses.append('%s:%s' % ( + sentinel.connection_pool.connection_kwargs['host'], + sentinel.connection_pool.connection_kwargs['port'], + )) + return '%s' % ( + type(self).__name__, + ','.join(sentinel_addresses)) + + def check_master_state(self, state, service_name): + if not state['is_master'] or state['is_sdown'] or state['is_odown']: + return False + # Check if our sentinel doesn't see other nodes + if state['num-other-sentinels'] < self.min_other_sentinels: + return False + return True + + def discover_master(self, service_name): + """ + Asks sentinel servers for the Redis master's address corresponding + to the service labeled ``service_name``. + + Returns a pair (address, port) or raises MasterNotFoundError if no + master is found. + """ + for sentinel_no, sentinel in enumerate(self.sentinels): + try: + masters = sentinel.sentinel_masters() + except (ConnectionError, TimeoutError): + continue + state = masters.get(service_name) + if state and self.check_master_state(state, service_name): + # Put this sentinel at the top of the list + self.sentinels[0], self.sentinels[sentinel_no] = ( + sentinel, self.sentinels[0]) + return state['ip'], state['port'] + raise MasterNotFoundError("No master found for %r" % (service_name,)) + + def filter_slaves(self, slaves): + "Remove slaves that are in an ODOWN or SDOWN state" + slaves_alive = [] + for slave in slaves: + if slave['is_odown'] or slave['is_sdown']: + continue + slaves_alive.append((slave['ip'], slave['port'])) + return slaves_alive + + def discover_slaves(self, service_name): + "Returns a list of alive slaves for service ``service_name``" + for sentinel in self.sentinels: + try: + slaves = sentinel.sentinel_slaves(service_name) + except (ConnectionError, ResponseError, TimeoutError): + continue + slaves = self.filter_slaves(slaves) + if slaves: + return slaves + return [] + + def master_for(self, service_name, redis_class=Redis, + connection_pool_class=SentinelConnectionPool, **kwargs): + """ + Returns a redis client instance for the ``service_name`` master. + + A SentinelConnectionPool class is used to retrive the master's + address before establishing a new connection. + + NOTE: If the master's address has changed, any cached connections to + the old master are closed. + + By default clients will be a redis.Redis instance. Specify a + different class to the ``redis_class`` argument if you desire + something different. + + The ``connection_pool_class`` specifies the connection pool to use. + The SentinelConnectionPool will be used by default. + + All other keyword arguments are merged with any connection_kwargs + passed to this class and passed to the connection pool as keyword + arguments to be used to initialize Redis connections. + """ + kwargs['is_master'] = True + connection_kwargs = dict(self.connection_kwargs) + connection_kwargs.update(kwargs) + return redis_class(connection_pool=connection_pool_class( + service_name, self, **connection_kwargs)) + + def slave_for(self, service_name, redis_class=Redis, + connection_pool_class=SentinelConnectionPool, **kwargs): + """ + Returns redis client instance for the ``service_name`` slave(s). + + A SentinelConnectionPool class is used to retrive the slave's + address before establishing a new connection. + + By default clients will be a redis.Redis instance. Specify a + different class to the ``redis_class`` argument if you desire + something different. + + The ``connection_pool_class`` specifies the connection pool to use. + The SentinelConnectionPool will be used by default. + + All other keyword arguments are merged with any connection_kwargs + passed to this class and passed to the connection pool as keyword + arguments to be used to initialize Redis connections. + """ + kwargs['is_master'] = False + connection_kwargs = dict(self.connection_kwargs) + connection_kwargs.update(kwargs) + return redis_class(connection_pool=connection_pool_class( + service_name, self, **connection_kwargs)) diff --git a/infrastructure/lambda/task_queue_manager/redis/utils.py b/infrastructure/lambda/task_queue_manager/redis/utils.py new file mode 100644 index 0000000..6ef6fd4 --- /dev/null +++ b/infrastructure/lambda/task_queue_manager/redis/utils.py @@ -0,0 +1,33 @@ +from contextlib import contextmanager + + +try: + import hiredis # noqa + HIREDIS_AVAILABLE = True +except ImportError: + HIREDIS_AVAILABLE = False + + +def from_url(url, db=None, **kwargs): + """ + Returns an active Redis client generated from the given database URL. + + Will attempt to extract the database id from the path url fragment, if + none is provided. + """ + from redis.client import Redis + return Redis.from_url(url, db, **kwargs) + + +@contextmanager +def pipeline(redis_obj): + p = redis_obj.pipeline() + yield p + p.execute() + + +class dummy(object): + """ + Instances of this class can be used as an attribute container. + """ + pass