Redis клиент сменен с синхронного на ассинхронный
This commit is contained in:
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "aiohttpx"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
description = "Custom HTTPX client with aiohttp transport, rate limiter and caching"
|
||||
readme = "README.md"
|
||||
authors = [
|
||||
|
||||
@ -2,9 +2,10 @@ from httpx import Request
|
||||
from httpx import Response as HTTPXResponse
|
||||
from httpx import _models as m # type: ignore
|
||||
from orjson import dumps, loads
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from aiohttpx.responses import Response
|
||||
from aiohttpx.transports.rate_limiter import AsyncRateLimit, Redis
|
||||
from aiohttpx.transports.rate_limiter import AsyncRateLimit
|
||||
|
||||
|
||||
def generate_cache_key(request: Request) -> str:
|
||||
@ -15,7 +16,7 @@ def generate_cache_key(request: Request) -> str:
|
||||
return f'cache:{hash(str(dumps(request_data)))}'
|
||||
|
||||
|
||||
def cache_response(
|
||||
async def cache_response(
|
||||
client: Redis[bytes],
|
||||
cache_key: str,
|
||||
request: Request,
|
||||
@ -25,7 +26,7 @@ def cache_response(
|
||||
ttl = get_ttl_from_headers(request.headers)
|
||||
|
||||
if ttl:
|
||||
client.set(cache_key, serialized_response, ex=ttl)
|
||||
await client.set(cache_key, serialized_response, ex=ttl)
|
||||
|
||||
|
||||
def get_ttl_from_headers(headers: m.Headers) -> int | None:
|
||||
@ -36,10 +37,10 @@ def get_ttl_from_headers(headers: m.Headers) -> int | None:
|
||||
return None
|
||||
|
||||
|
||||
def get_cached_response(
|
||||
async def get_cached_response(
|
||||
client: Redis[bytes], cache_key: str
|
||||
) -> Response | None:
|
||||
cached_data = client.get(cache_key)
|
||||
cached_data = await client.get(cache_key)
|
||||
|
||||
if cached_data:
|
||||
return deserialize_response(cached_data)
|
||||
@ -83,13 +84,13 @@ class AsyncCacheTransport(AsyncRateLimit):
|
||||
return await self.transport.handle_async_request(request)
|
||||
|
||||
cache_key = generate_cache_key(request)
|
||||
cached_response = get_cached_response(self.client, cache_key)
|
||||
cached_response = await get_cached_response(self.client, cache_key)
|
||||
|
||||
if cached_response:
|
||||
return cached_response
|
||||
|
||||
response = await self.transport.handle_async_request(request)
|
||||
|
||||
cache_response(self.client, cache_key, request, response)
|
||||
await cache_response(self.client, cache_key, request, response)
|
||||
|
||||
return response
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from asyncio import sleep as async_sleep
|
||||
|
||||
from httpx import Request
|
||||
from redis import Redis
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from aiohttpx.responses import Response
|
||||
from aiohttpx.transports.aio import AiohttpTransport
|
||||
@ -30,14 +30,14 @@ class AsyncRateLimit(AiohttpTransport):
|
||||
t: int = int(self.client.time()[0]) # type: ignore
|
||||
separation = round(60 / self.limit)
|
||||
|
||||
value = self.client.get(self.key) or t
|
||||
self.client.setnx(self.key, value)
|
||||
value = await self.client.get(self.key) or t
|
||||
await self.client.setnx(self.key, value)
|
||||
|
||||
tat = max(int(value), t)
|
||||
|
||||
if tat - t <= 60 - separation:
|
||||
new_tat = max(tat, t) + separation
|
||||
self.client.set(self.key, new_tat)
|
||||
await self.client.set(self.key, new_tat)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
Reference in New Issue
Block a user