Compare commits
7 Commits
40c017b842
...
latest
| Author | SHA1 | Date | |
|---|---|---|---|
| 064c20f0b1 | |||
| 3eaa68c559 | |||
| e8a0ab0f41 | |||
| 4dd1e4b1a6 | |||
| 45af5e247c | |||
| 9ab9bcfc96 | |||
| 33f25c799c |
117
README.md
117
README.md
@ -0,0 +1,117 @@
|
||||
# aiohttpx
|
||||
|
||||
## Description
|
||||
|
||||
**aiohttpx** is an asynchronous HTTP client that merges the ergonomics and powerful API of [httpx](https://github.com/encode/httpx) with the high-performance transport layer of [aiohttp](https://github.com/aio-libs/aiohttp).
|
||||
It also provides optional Redis-powered caching and rate-limiting to enable efficient, production-grade request handling with minimal setup.
|
||||
|
||||
## Features
|
||||
|
||||
* Fully asynchronous HTTP client using **aiohttp** as the transport.
|
||||
* Optional **Redis-based caching** to reduce redundant API calls.
|
||||
* Optional **Redis-based rate limiting** to control request throughput.
|
||||
* Familiar API interface inspired by **httpx**.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Python 3.13 or higher
|
||||
* Redis server (if using caching or rate limiting)
|
||||
|
||||
## Installation
|
||||
|
||||
### Using `uv` Tool
|
||||
|
||||
This project supports dependency management via the [uv tool](https://github.com/astral-sh/uv).
|
||||
To set up the project:
|
||||
|
||||
1. **Install uv**
|
||||
|
||||
```bash
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
```
|
||||
|
||||
2. **Add to the repository**
|
||||
|
||||
```bash
|
||||
uv add https://git.meowly.ru/Miwory/aiohttpx.git
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
aiohttpx supports several optional parameters for caching and rate limiting:
|
||||
|
||||
### `key` — Redis prefix
|
||||
|
||||
A string used as the **Redis key namespace** for all cache and rate-limit entries.
|
||||
This allows multiple clients or services to share the same Redis instance without collisions.
|
||||
|
||||
### `limit` — Rate limit
|
||||
|
||||
The maximum number of requests allowed **per second** for this client.
|
||||
This value is enforced using Redis, making it safe to use across distributed systems.
|
||||
|
||||
### `X-Cache-TTL` — Enable caching for a request
|
||||
|
||||
To enable caching for a specific request, include the header:
|
||||
|
||||
```text
|
||||
X-Cache-TTL: <seconds>
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
response = await client.get(
|
||||
"/users",
|
||||
headers={"X-Cache-TTL": "60"}, # cache this endpoint for 60 seconds
|
||||
)
|
||||
```
|
||||
|
||||
If this header is present and Redis is configured, the response will be cached for the specified duration.
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Example
|
||||
|
||||
```python
|
||||
from aiohttpx.client import AioHTTPXClient
|
||||
|
||||
class TwitchAPIClient(AioHTTPXClient):
|
||||
def __init__(
|
||||
self,
|
||||
redis_url: str,
|
||||
client_id: str,
|
||||
client_secret: str,
|
||||
redirect_uri: str,
|
||||
):
|
||||
self.base_uri = 'https://api.twitch.tv/helix'
|
||||
self.client_id = client_id
|
||||
self.client_secret = client_secret
|
||||
self.redirect_uri = redirect_uri
|
||||
|
||||
super().__init__(
|
||||
base_url=self.base_uri,
|
||||
headers={'Client-Id': self.client_id},
|
||||
redis_url=redis_url,
|
||||
key='twitch', # Redis prefix
|
||||
limit=10, # 10 requests per second
|
||||
logger='Twitch API',
|
||||
)
|
||||
|
||||
async def test_endpoint(self):
|
||||
...
|
||||
```
|
||||
|
||||
## Linting and Pre-commit Checks
|
||||
|
||||
This project uses `pre-commit` and `ruff` for linting and formatting.
|
||||
Run the linting process with:
|
||||
|
||||
```bash
|
||||
poe lint
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
See `LICENSE` for details.
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "aiohttpx"
|
||||
version = "0.3.0"
|
||||
version = "1.2.2"
|
||||
description = "Custom HTTPX client with aiohttp transport, rate limiter and caching"
|
||||
readme = "README.md"
|
||||
authors = [
|
||||
|
||||
@ -1 +1 @@
|
||||
__version__: str = '0.3.0'
|
||||
__version__: str = '1.1.0'
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from collections.abc import Callable, Mapping
|
||||
from logging import getLogger
|
||||
from ssl import SSLContext
|
||||
from typing import Any
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from httpx import URL, Limits
|
||||
from httpx import AsyncClient as AsyncHTTPXClient
|
||||
@ -10,6 +10,8 @@ from httpx import _types as t # type: ignore
|
||||
|
||||
from aiohttpx.transports.cache import AsyncCacheTransport
|
||||
|
||||
K = TypeVar('K')
|
||||
|
||||
|
||||
class AioHTTPXClient(AsyncHTTPXClient):
|
||||
def __init__(
|
||||
@ -33,6 +35,7 @@ class AioHTTPXClient(AsyncHTTPXClient):
|
||||
redis_url: str | None = None,
|
||||
key: str | None = None,
|
||||
limit: int | None = None,
|
||||
logger: str | None = __name__,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
auth=auth,
|
||||
@ -56,7 +59,10 @@ class AioHTTPXClient(AsyncHTTPXClient):
|
||||
default_encoding=default_encoding,
|
||||
)
|
||||
|
||||
self.logger = getLogger(__name__)
|
||||
self.logger = getLogger(logger)
|
||||
|
||||
def clean_dict[K, V](self, params: dict[K, Any | None]):
|
||||
return {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
|
||||
__all__ = ['AioHTTPXClient']
|
||||
|
||||
56
src/aiohttpx/status.py
Normal file
56
src/aiohttpx/status.py
Normal file
@ -0,0 +1,56 @@
|
||||
CONTINUE = 100
|
||||
SWITCHING_PROTOCOLS = 101
|
||||
OK = 200
|
||||
CREATED = 201
|
||||
ACCEPTED = 202
|
||||
NON_AUTHORITATIVE_INFORMATION = 203
|
||||
NO_CONTENT = 204
|
||||
RESET_CONTENT = 205
|
||||
PARTIAL_CONTENT = 206
|
||||
MULTIPLE_CHOICES = 300
|
||||
MOVED_PERMANENTLY = 301
|
||||
FOUND = 302
|
||||
SEE_OTHER = 303
|
||||
NOT_MODIFIED = 304
|
||||
USE_PROXY = 305
|
||||
SWITCH_PROXY = 306
|
||||
TEMPORARY_REDIRECT = 307
|
||||
PERMANENT_REDIRECT = 308
|
||||
BAD_REQUEST = 400
|
||||
UNAUTHORIZED = 401
|
||||
PAYMENT_REQUIRED = 402
|
||||
FORBIDDEN = 403
|
||||
NOT_FOUND = 404
|
||||
METHOD_NOT_ALLOWED = 405
|
||||
NOT_ACCEPTABLE = 406
|
||||
PROXY_AUTHENTICATION_REQUIRED = 407
|
||||
REQUEST_TIMEOUT = 408
|
||||
CONFLICT = 409
|
||||
GONE = 410
|
||||
LENGTH_REQUIRED = 411
|
||||
PRECONDITION_FAILED = 412
|
||||
REQUEST_ENTITY_TOO_LARGE = 413
|
||||
REQUEST_URI_TOO_LONG = 414
|
||||
UNSUPPORTED_MEDIA_TYPE = 415
|
||||
REQUESTED_RANGE_NOT_SATISFIABLE = 416
|
||||
EXPECTATION_FAILED = 417
|
||||
IM_A_TEAPOT = 418
|
||||
MISDIRECTED_REQUEST = 422
|
||||
UNPROCESSABLE_ENTITY = 422
|
||||
LOCKED = 423
|
||||
FAILED_DEPENDENCY = 424
|
||||
UPGRADE_REQUIRED = 426
|
||||
PRECONDITION_REQUIRED = 428
|
||||
TOO_MANY_REQUESTS = 429
|
||||
REQUEST_HEADER_FIELDS_TOO_LARGE = 431
|
||||
INTERNAL_SERVER_ERROR = 500
|
||||
NOT_IMPLEMENTED = 501
|
||||
BAD_GATEWAY = 502
|
||||
SERVICE_UNAVAILABLE = 503
|
||||
GATEWAY_TIMEOUT = 504
|
||||
HTTP_VERSION_NOT_SUPPORTED = 505
|
||||
VARIANT_ALSO_NEGOTIATES = 506
|
||||
INSUFFICIENT_STORAGE = 507
|
||||
LOOP_DETECTED = 508
|
||||
NOT_EXTENDED = 510
|
||||
NETWORK_AUTHENTICATION_REQUIRED = 511
|
||||
@ -2,9 +2,10 @@ from httpx import Request
|
||||
from httpx import Response as HTTPXResponse
|
||||
from httpx import _models as m # type: ignore
|
||||
from orjson import dumps, loads
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from aiohttpx.responses import Response
|
||||
from aiohttpx.transports.rate_limiter import AsyncRateLimit, Redis
|
||||
from aiohttpx.transports.rate_limiter import AsyncRateLimit
|
||||
|
||||
|
||||
def generate_cache_key(request: Request) -> str:
|
||||
@ -15,8 +16,8 @@ def generate_cache_key(request: Request) -> str:
|
||||
return f'cache:{hash(str(dumps(request_data)))}'
|
||||
|
||||
|
||||
def cache_response(
|
||||
client: Redis[bytes],
|
||||
async def cache_response(
|
||||
client: 'Redis[bytes]',
|
||||
cache_key: str,
|
||||
request: Request,
|
||||
response: Response | HTTPXResponse,
|
||||
@ -25,7 +26,7 @@ def cache_response(
|
||||
ttl = get_ttl_from_headers(request.headers)
|
||||
|
||||
if ttl:
|
||||
client.set(cache_key, serialized_response, ex=ttl)
|
||||
await client.set(cache_key, serialized_response, ex=ttl)
|
||||
|
||||
|
||||
def get_ttl_from_headers(headers: m.Headers) -> int | None:
|
||||
@ -36,10 +37,10 @@ def get_ttl_from_headers(headers: m.Headers) -> int | None:
|
||||
return None
|
||||
|
||||
|
||||
def get_cached_response(
|
||||
client: Redis[bytes], cache_key: str
|
||||
async def get_cached_response(
|
||||
client: 'Redis[bytes]', cache_key: str
|
||||
) -> Response | None:
|
||||
cached_data = client.get(cache_key)
|
||||
cached_data = await client.get(cache_key)
|
||||
|
||||
if cached_data:
|
||||
return deserialize_response(cached_data)
|
||||
@ -83,13 +84,13 @@ class AsyncCacheTransport(AsyncRateLimit):
|
||||
return await self.transport.handle_async_request(request)
|
||||
|
||||
cache_key = generate_cache_key(request)
|
||||
cached_response = get_cached_response(self.client, cache_key)
|
||||
cached_response = await get_cached_response(self.client, cache_key)
|
||||
|
||||
if cached_response:
|
||||
return cached_response
|
||||
|
||||
response = await self.transport.handle_async_request(request)
|
||||
|
||||
cache_response(self.client, cache_key, request, response)
|
||||
await cache_response(self.client, cache_key, request, response)
|
||||
|
||||
return response
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from asyncio import sleep as async_sleep
|
||||
|
||||
from httpx import Request
|
||||
from redis import Redis
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from aiohttpx.responses import Response
|
||||
from aiohttpx.transports.aio import AiohttpTransport
|
||||
@ -9,7 +9,7 @@ from aiohttpx.transports.aio import AiohttpTransport
|
||||
|
||||
class AsyncRateLimit(AiohttpTransport):
|
||||
def __init__(
|
||||
self, redis: Redis[bytes] | None, key: str | None, limit: int | None
|
||||
self, redis: 'Redis[bytes] | None', key: str | None, limit: int | None
|
||||
) -> None:
|
||||
self.transport = AiohttpTransport()
|
||||
self.client = redis
|
||||
@ -27,17 +27,18 @@ class AsyncRateLimit(AiohttpTransport):
|
||||
|
||||
async def request_is_limited(self) -> bool:
|
||||
if self.client and self.key and self.limit:
|
||||
t: int = int(self.client.time()[0]) # type: ignore
|
||||
time = await self.client.time() # type: ignore
|
||||
t: int = int(time[0]) # type: ignore
|
||||
separation = round(60 / self.limit)
|
||||
|
||||
value = self.client.get(self.key) or t
|
||||
self.client.setnx(self.key, value)
|
||||
value = await self.client.get(self.key) or t
|
||||
await self.client.setnx(self.key, value)
|
||||
|
||||
tat = max(int(value), t)
|
||||
|
||||
if tat - t <= 60 - separation:
|
||||
new_tat = max(tat, t) + separation
|
||||
self.client.set(self.key, new_tat)
|
||||
await self.client.set(self.key, new_tat)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
Reference in New Issue
Block a user