add alru_cache(), async_= to timed_cache()

This commit is contained in:
Yusur 2025-08-15 20:37:33 +02:00
parent c3215c7c8b
commit ab6dbbade6
3 changed files with 241 additions and 14 deletions

View file

@ -8,7 +8,7 @@
* Automatically handles commit and rollback * Automatically handles commit and rollback
+ `sqlalchemy_async` also offers `async_query()` + `sqlalchemy_async` also offers `async_query()`
+ Changed `sqlalchemy.parent_children()` to use `lazy='selectin'` by default + Changed `sqlalchemy.parent_children()` to use `lazy='selectin'` by default
+ Add `timed_cache()`, `TimedDict()`, `none_pass()`, `twocolon_list()`, `quote_css_string()`, `must_be()` + Add `timed_cache()`, `alru_cache()`, `TimedDict()`, `none_pass()`, `twocolon_list()`, `quote_css_string()`, `must_be()`
+ Add module `calendar` with `want_*` date type conversion utilities and `age_and_days()` + Add module `calendar` with `want_*` date type conversion utilities and `age_and_days()`
+ Move obsolete stuff to `obsolete` package (includes configparse 0.3 as of now) + Move obsolete stuff to `obsolete` package (includes configparse 0.3 as of now)
+ Add `redact` module with `redact_url_password()` + Add `redact` module with `redact_url_password()`

View file

@ -14,11 +14,16 @@ This software is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
""" """
from collections import namedtuple
import math import math
from threading import RLock
import time import time
from typing import Callable, TypeVar from types import CoroutineType, NoneType
from typing import Callable, Iterable, Mapping, TypeVar
import warnings import warnings
from functools import wraps, lru_cache from functools import update_wrapper, wraps, lru_cache
from suou.itertools import hashed_list
_T = TypeVar('_T') _T = TypeVar('_T')
_U = TypeVar('_U') _U = TypeVar('_U')
@ -70,17 +75,218 @@ def not_implemented(msg: Callable | str | None = None):
return decorator return decorator
def timed_cache(ttl: int, maxsize: int = 128, typed: bool = False) -> Callable[[Callable], Callable]: def flat_args(args: Iterable, kwds: Mapping, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, NoneType},
sorted=sorted, tuple=tuple, type=type, len=len):
'''Turn optionally positional and keyword arguments into a hashable key for use in caches.
Shamelessly copied from functools._make_key() from the Python Standard Library.
Never trust underscores, you know.
This assumes all argument types are hashable!'''
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return hashed_list(key)
def _make_alru_cache(_CacheInfo):
def alru_cache(maxsize: int = 128, typed: bool = False):
"""
Reimplementation of lru_cache(). In fact it's lru_cache() from Python==3.13.7 Standard
Library with just three lines modified.
Shamelessly adapted from the Python Standard Library with modifications.
PSA there is no C speed up. Unlike PSL. Sorry.
NEW 0.5.0
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
# suou.alru_cache is based on pure-Python functools.lru_cache() as of Python 3.13.7.
if isinstance(maxsize, int):
# Negative maxsize is treated as 0
if maxsize < 0:
maxsize = 0
elif callable(maxsize) and isinstance(typed, bool):
# The user_function was passed in directly via the maxsize argument
user_function, maxsize = maxsize, 128
wrapper = _alru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
return update_wrapper(wrapper, user_function)
elif maxsize is not None:
raise TypeError(
'Expected first argument to be an integer, a callable, or None')
def decorating_function(user_function: CoroutineType):
wrapper = _alru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
return update_wrapper(wrapper, user_function)
return decorating_function
def _alru_cache_wrapper(user_function, maxsize, typed):
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = flat_args # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
cache = {}
hits = misses = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None
cache_len = cache.__len__ # get cache size without calling len()
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
if maxsize == 0:
async def wrapper(*args, **kwds):
# No caching -- just a statistics update
nonlocal misses
misses += 1
result = await user_function(*args, **kwds)
return result
elif maxsize is None:
async def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
nonlocal hits, misses
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits += 1
return result
misses += 1
result = await user_function(*args, **kwds)
cache[key] = result
return result
else:
async def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
nonlocal root, hits, misses, full
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
hits += 1
return result
misses += 1
result = await user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full:
# Use the old root to store the new key and result.
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
root = oldroot[NEXT]
oldkey = root[KEY]
oldresult = root[RESULT]
root[KEY] = root[RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
# Use the cache_len bound method instead of the len() function
# which could potentially be wrapped in an lru_cache itself.
full = (cache_len() >= maxsize)
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits, misses, maxsize, cache_len())
def cache_clear():
"""Clear the cache and cache statistics"""
nonlocal hits, misses, full
with lock:
cache.clear()
root[:] = [root, root, None, None]
hits = misses = 0
full = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return alru_cache
alru_cache = _make_alru_cache(namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]))
del _make_alru_cache
def timed_cache(ttl: int, maxsize: int = 128, typed: bool = False, *, async_: bool = False) -> Callable[[Callable], Callable]:
""" """
LRU cache which expires after the TTL in seconds passed as argument. LRU cache which expires after the TTL in seconds passed as argument.
Supports coroutines with async_=True.
NEW 0.5.0 NEW 0.5.0
""" """
def decorator(func): def decorator(func):
start_time = None start_time = None
if async_:
@alru_cache(maxsize, typed)
async def inner_wrapper(ttl_period: int, /, *a, **k):
return await func(*a, **k)
@wraps(func)
async def wrapper(*a, **k):
nonlocal start_time
if not start_time:
start_time = int(time.time())
return await inner_wrapper(math.floor((time.time() - start_time) // ttl), *a, **k)
return wrapper
else:
@lru_cache(maxsize, typed) @lru_cache(maxsize, typed)
def inner_wrapper(ttl_period: int, *a, **k): def inner_wrapper(ttl_period: int, /, *a, **k):
return func(*a, **k) return func(*a, **k)
@wraps(func) @wraps(func)

View file

@ -103,6 +103,27 @@ def addattr(obj: Any, /, name: str = None):
return func return func
return decorator return decorator
class hashed_list(list):
"""
Used by lru_cache() functions.
This class guarantees that hash() will be called no more than once
per element. This is important because the lru_cache() will hash
the key multiple times on a cache miss.
Shamelessly copied from functools._HashedSeq() from the Python Standard Library.
Never trust underscores, you know.
"""
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
__all__ = ('makelist', 'kwargs_prefix', 'ltuple', 'rtuple', 'additem', 'addattr') __all__ = ('makelist', 'kwargs_prefix', 'ltuple', 'rtuple', 'additem', 'addattr')