Compare commits
5 Commits
9ab9bcfc96
...
latest
| Author | SHA1 | Date | |
|---|---|---|---|
| 064c20f0b1 | |||
| 3eaa68c559 | |||
| e8a0ab0f41 | |||
| 4dd1e4b1a6 | |||
| 45af5e247c |
116
README.md
116
README.md
@ -1,9 +1,117 @@
|
||||
# aiohttpx
|
||||
|
||||
aiohttpx is a HTTP client built on top of the [httpx](https://github.com/encode/httpx) and [aiohttp](https://github.com/aio-libs/aiohttp) libraries.
|
||||
## Description
|
||||
|
||||
**aiohttpx** is an asynchronous HTTP client that merges the ergonomics and powerful API of [httpx](https://github.com/encode/httpx) with the high-performance transport layer of [aiohttp](https://github.com/aio-libs/aiohttp).
|
||||
It also provides optional Redis-powered caching and rate-limiting to enable efficient, production-grade request handling with minimal setup.
|
||||
|
||||
## Features
|
||||
|
||||
* Fully asynchronous using aiohttp as the transport
|
||||
* Supports caching using Redis as the backend
|
||||
* Supports rate limiting using Redis as the backend
|
||||
* Fully asynchronous HTTP client using **aiohttp** as the transport.
|
||||
* Optional **Redis-based caching** to reduce redundant API calls.
|
||||
* Optional **Redis-based rate limiting** to control request throughput.
|
||||
* Familiar API interface inspired by **httpx**.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Python 3.13 or higher
|
||||
* Redis server (if using caching or rate limiting)
|
||||
|
||||
## Installation
|
||||
|
||||
### Using `uv` Tool
|
||||
|
||||
This project supports dependency management via the [uv tool](https://github.com/astral-sh/uv).
|
||||
To set up the project:
|
||||
|
||||
1. **Install uv**
|
||||
|
||||
```bash
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
```
|
||||
|
||||
2. **Add to the repository**
|
||||
|
||||
```bash
|
||||
uv add https://git.meowly.ru/Miwory/aiohttpx.git
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
aiohttpx supports several optional parameters for caching and rate limiting:
|
||||
|
||||
### `key` — Redis prefix
|
||||
|
||||
A string used as the **Redis key namespace** for all cache and rate-limit entries.
|
||||
This allows multiple clients or services to share the same Redis instance without collisions.
|
||||
|
||||
### `limit` — Rate limit
|
||||
|
||||
The maximum number of requests allowed **per second** for this client.
|
||||
This value is enforced using Redis, making it safe to use across distributed systems.
|
||||
|
||||
### `X-Cache-TTL` — Enable caching for a request
|
||||
|
||||
To enable caching for a specific request, include the header:
|
||||
|
||||
```text
|
||||
X-Cache-TTL: <seconds>
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
response = await client.get(
|
||||
"/users",
|
||||
headers={"X-Cache-TTL": "60"}, # cache this endpoint for 60 seconds
|
||||
)
|
||||
```
|
||||
|
||||
If this header is present and Redis is configured, the response will be cached for the specified duration.
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Example
|
||||
|
||||
```python
|
||||
from aiohttpx.client import AioHTTPXClient
|
||||
|
||||
class TwitchAPIClient(AioHTTPXClient):
|
||||
def __init__(
|
||||
self,
|
||||
redis_url: str,
|
||||
client_id: str,
|
||||
client_secret: str,
|
||||
redirect_uri: str,
|
||||
):
|
||||
self.base_uri = 'https://api.twitch.tv/helix'
|
||||
self.client_id = client_id
|
||||
self.client_secret = client_secret
|
||||
self.redirect_uri = redirect_uri
|
||||
|
||||
super().__init__(
|
||||
base_url=self.base_uri,
|
||||
headers={'Client-Id': self.client_id},
|
||||
redis_url=redis_url,
|
||||
key='twitch', # Redis prefix
|
||||
limit=10, # 10 requests per second
|
||||
logger='Twitch API',
|
||||
)
|
||||
|
||||
async def test_endpoint(self):
|
||||
...
|
||||
```
|
||||
|
||||
## Linting and Pre-commit Checks
|
||||
|
||||
This project uses `pre-commit` and `ruff` for linting and formatting.
|
||||
Run the linting process with:
|
||||
|
||||
```bash
|
||||
poe lint
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
See `LICENSE` for details.
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "aiohttpx"
|
||||
version = "1.0.0"
|
||||
version = "1.2.2"
|
||||
description = "Custom HTTPX client with aiohttp transport, rate limiter and caching"
|
||||
readme = "README.md"
|
||||
authors = [
|
||||
|
||||
@ -1 +1 @@
|
||||
__version__: str = '1.0.0'
|
||||
__version__: str = '1.1.0'
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from collections.abc import Callable, Mapping
|
||||
from logging import getLogger
|
||||
from ssl import SSLContext
|
||||
from typing import Any
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from httpx import URL, Limits
|
||||
from httpx import AsyncClient as AsyncHTTPXClient
|
||||
@ -10,6 +10,8 @@ from httpx import _types as t # type: ignore
|
||||
|
||||
from aiohttpx.transports.cache import AsyncCacheTransport
|
||||
|
||||
K = TypeVar('K')
|
||||
|
||||
|
||||
class AioHTTPXClient(AsyncHTTPXClient):
|
||||
def __init__(
|
||||
@ -59,5 +61,8 @@ class AioHTTPXClient(AsyncHTTPXClient):
|
||||
|
||||
self.logger = getLogger(logger)
|
||||
|
||||
def clean_dict[K, V](self, params: dict[K, Any | None]):
|
||||
return {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
|
||||
__all__ = ['AioHTTPXClient']
|
||||
|
||||
@ -2,9 +2,10 @@ from httpx import Request
|
||||
from httpx import Response as HTTPXResponse
|
||||
from httpx import _models as m # type: ignore
|
||||
from orjson import dumps, loads
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from aiohttpx.responses import Response
|
||||
from aiohttpx.transports.rate_limiter import AsyncRateLimit, Redis
|
||||
from aiohttpx.transports.rate_limiter import AsyncRateLimit
|
||||
|
||||
|
||||
def generate_cache_key(request: Request) -> str:
|
||||
@ -15,8 +16,8 @@ def generate_cache_key(request: Request) -> str:
|
||||
return f'cache:{hash(str(dumps(request_data)))}'
|
||||
|
||||
|
||||
def cache_response(
|
||||
client: Redis[bytes],
|
||||
async def cache_response(
|
||||
client: 'Redis[bytes]',
|
||||
cache_key: str,
|
||||
request: Request,
|
||||
response: Response | HTTPXResponse,
|
||||
@ -25,7 +26,7 @@ def cache_response(
|
||||
ttl = get_ttl_from_headers(request.headers)
|
||||
|
||||
if ttl:
|
||||
client.set(cache_key, serialized_response, ex=ttl)
|
||||
await client.set(cache_key, serialized_response, ex=ttl)
|
||||
|
||||
|
||||
def get_ttl_from_headers(headers: m.Headers) -> int | None:
|
||||
@ -36,10 +37,10 @@ def get_ttl_from_headers(headers: m.Headers) -> int | None:
|
||||
return None
|
||||
|
||||
|
||||
def get_cached_response(
|
||||
client: Redis[bytes], cache_key: str
|
||||
async def get_cached_response(
|
||||
client: 'Redis[bytes]', cache_key: str
|
||||
) -> Response | None:
|
||||
cached_data = client.get(cache_key)
|
||||
cached_data = await client.get(cache_key)
|
||||
|
||||
if cached_data:
|
||||
return deserialize_response(cached_data)
|
||||
@ -83,13 +84,13 @@ class AsyncCacheTransport(AsyncRateLimit):
|
||||
return await self.transport.handle_async_request(request)
|
||||
|
||||
cache_key = generate_cache_key(request)
|
||||
cached_response = get_cached_response(self.client, cache_key)
|
||||
cached_response = await get_cached_response(self.client, cache_key)
|
||||
|
||||
if cached_response:
|
||||
return cached_response
|
||||
|
||||
response = await self.transport.handle_async_request(request)
|
||||
|
||||
cache_response(self.client, cache_key, request, response)
|
||||
await cache_response(self.client, cache_key, request, response)
|
||||
|
||||
return response
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from asyncio import sleep as async_sleep
|
||||
|
||||
from httpx import Request
|
||||
from redis import Redis
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from aiohttpx.responses import Response
|
||||
from aiohttpx.transports.aio import AiohttpTransport
|
||||
@ -9,7 +9,7 @@ from aiohttpx.transports.aio import AiohttpTransport
|
||||
|
||||
class AsyncRateLimit(AiohttpTransport):
|
||||
def __init__(
|
||||
self, redis: Redis[bytes] | None, key: str | None, limit: int | None
|
||||
self, redis: 'Redis[bytes] | None', key: str | None, limit: int | None
|
||||
) -> None:
|
||||
self.transport = AiohttpTransport()
|
||||
self.client = redis
|
||||
@ -27,17 +27,18 @@ class AsyncRateLimit(AiohttpTransport):
|
||||
|
||||
async def request_is_limited(self) -> bool:
|
||||
if self.client and self.key and self.limit:
|
||||
t: int = int(self.client.time()[0]) # type: ignore
|
||||
time = await self.client.time() # type: ignore
|
||||
t: int = int(time[0]) # type: ignore
|
||||
separation = round(60 / self.limit)
|
||||
|
||||
value = self.client.get(self.key) or t
|
||||
self.client.setnx(self.key, value)
|
||||
value = await self.client.get(self.key) or t
|
||||
await self.client.setnx(self.key, value)
|
||||
|
||||
tat = max(int(value), t)
|
||||
|
||||
if tat - t <= 60 - separation:
|
||||
new_tat = max(tat, t) + separation
|
||||
self.client.set(self.key, new_tat)
|
||||
await self.client.set(self.key, new_tat)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
Reference in New Issue
Block a user