Coverage for adhoc-cicd-odoo-odoo / odoo / tools / cache.py: 34%

231 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-03-09 18:15 +0000

1# Part of Odoo. See LICENSE file for full copyright and licensing details. 

2# decorator makes wrappers that have the same API as their wrapped function 

3from __future__ import annotations 

4 

5from collections import defaultdict 

6from collections.abc import Mapping, Collection 

7from inspect import signature, Parameter 

8import functools 

9import logging 

10import signal 

11import sys 

12import threading 

13import time 

14import typing 

15import warnings 

16 

17if typing.TYPE_CHECKING: 

18 from .lru import LRU 

19 from collections.abc import Callable, Iterable 

20 from odoo.models import BaseModel 

21 C = typing.TypeVar('C', bound=Callable) 

22 

23unsafe_eval = eval 

24 

25_logger = logging.getLogger(__name__) 

26_logger_lock = threading.RLock() 

27_logger_state: typing.Literal['wait', 'abort', 'run'] = 'wait' 

28 

29 

30class ormcache_counter: 

31 """ Statistic counters for cache entries. """ 

32 __slots__ = ['cache_name', 'err', 'gen_time', 'hit', 'miss', 'tx_err', 'tx_hit', 'tx_miss'] 

33 

34 def __init__(self): 

35 self.hit: int = 0 

36 self.miss: int = 0 

37 self.err: int = 0 

38 self.gen_time: float = 0.0 

39 self.cache_name: str = '' 

40 self.tx_hit: int = 0 

41 self.tx_miss: int = 0 

42 self.tx_err: int = 0 

43 

44 @property 

45 def ratio(self) -> float: 

46 return 100.0 * self.hit / (self.hit + self.miss or 1) 

47 

48 @property 

49 def tx_ratio(self) -> float: 

50 return 100.0 * self.tx_hit / (self.tx_hit + self.tx_miss or 1) 

51 

52 @property 

53 def tx_calls(self) -> int: 

54 return self.tx_hit + self.tx_miss 

55 

56 

57_COUNTERS: defaultdict[tuple[str, Callable], ormcache_counter] = defaultdict(ormcache_counter) 

58"""statistic counters dictionary, maps (dbname, method) to counter""" 

59 

60 

61class ormcache: 

62 """ LRU cache decorator for model methods. 

63 The parameters are strings that represent expressions referring to the 

64 signature of the decorated method, and are used to compute a cache key:: 

65 

66 @ormcache('model_name', 'mode') 

67 def _compute_domain(self, model_name, mode="read"): 

68 ... 

69 

70 For the sake of backward compatibility, the decorator supports the named 

71 parameter `skiparg`:: 

72 

73 @ormcache(skiparg=1) 

74 def _compute_domain(self, model_name, mode="read"): 

75 ... 

76 

77 Methods implementing this decorator should never return a Recordset, 

78 because the underlying cursor will eventually be closed and raise a 

79 `psycopg2.InterfaceError`. 

80 """ 

81 key: Callable[..., tuple] 

82 

83 def __init__(self, *args: str, cache: str = 'default', skiparg: int | None = None, **kwargs): 

84 self.args = args 

85 self.skiparg = skiparg 

86 self.cache_name = cache 

87 if skiparg is not None: 87 ↛ 88line 87 didn't jump to line 88 because the condition on line 87 was never true

88 warnings.warn("Deprecated since 19.0, ormcache(skiparg) will be removed", DeprecationWarning) 

89 

90 def __call__(self, method: C) -> C: 

91 assert not hasattr(self, 'method'), "ormcache is already bound to a method" 

92 self.method = method 

93 self.determine_key() 

94 assert self.key is not None, "ormcache.key not initialized" 

95 

96 @functools.wraps(method) 

97 def lookup(*args, **kwargs): 

98 return self.lookup(*args, **kwargs) 

99 lookup.__cache__ = self # type: ignore 

100 return lookup 

101 

102 def add_value(self, *args, cache_value=None, **kwargs) -> None: 

103 model: BaseModel = args[0] 

104 d: LRU = model.pool._Registry__caches[self.cache_name] # type: ignore 

105 key = self.key(*args, **kwargs) 

106 d[key] = cache_value 

107 

108 def determine_key(self) -> None: 

109 """ Determine the function that computes a cache key from arguments. """ 

110 assert self.method is not None 

111 if self.skiparg is not None: 111 ↛ 113line 111 didn't jump to line 113 because the condition on line 111 was never true

112 # backward-compatible function that uses self.skiparg 

113 self.key = lambda *args, **kwargs: (args[0]._name, self.method, *args[self.skiparg:]) 

114 return 

115 # build a string that represents function code and evaluate it 

116 args = ', '.join( 

117 # remove annotations because lambdas can't be type-annotated, 

118 str(params.replace(annotation=Parameter.empty)) 

119 for params in signature(self.method).parameters.values() 

120 ) 

121 values = ['self._name', 'method', *self.args] 

122 code = f"lambda {args}: ({''.join(a for arg in values for a in (arg, ','))})" 

123 self.key = unsafe_eval(code, {'method': self.method}) 

124 

125 def lookup(self, *args, **kwargs): 

126 model: BaseModel = args[0] 

127 d: LRU = model.pool._Registry__caches[self.cache_name] # type: ignore 

128 key = self.key(*args, **kwargs) 

129 counter = _COUNTERS[model.pool.db_name, self.method] 

130 

131 tx_lookups = model.env.cr.cache.setdefault('_ormcache_lookups', set()) 

132 # tx: is it the first call in the transation for that key 

133 tx_first_lookup = key not in tx_lookups 

134 if tx_first_lookup: 

135 counter.cache_name = self.cache_name 

136 tx_lookups.add(key) 

137 

138 try: 

139 r = d[key] 

140 counter.hit += 1 

141 counter.tx_hit += tx_first_lookup 

142 return r 

143 except KeyError: 

144 counter.miss += 1 

145 counter.tx_miss += tx_first_lookup 

146 miss = True 

147 except TypeError: 

148 _logger.warning("cache lookup error on %r", key, exc_info=True) 

149 counter.err += 1 

150 counter.tx_err += tx_first_lookup 

151 miss = False 

152 

153 if miss: 153 ↛ 160line 153 didn't jump to line 160 because the condition on line 153 was always true

154 start = time.monotonic() 

155 value = self.method(*args, **kwargs) 

156 counter.gen_time += time.monotonic() - start 

157 d[key] = value 

158 return value 

159 else: 

160 return self.method(*args, **kwargs) 

161 

162 

163class ormcache_context(ormcache): 

164 """ This LRU cache decorator is a variant of :class:`ormcache`, with an 

165 extra parameter ``keys`` that defines a sequence of dictionary keys. Those 

166 keys are looked up in the ``context`` parameter and combined to the cache 

167 key made by :class:`ormcache`. 

168 """ 

169 def __init__(self, *args: str, keys, skiparg=None, **kwargs): 

170 assert skiparg is None, "ormcache_context() no longer supports skiparg" 

171 warnings.warn("Since 19.0, use ormcache directly, context values are available as `self.env.context.get`", DeprecationWarning) 

172 super().__init__(*args, **kwargs) 

173 

174 def determine_key(self) -> None: 

175 assert self.method is not None 

176 sign = signature(self.method) 

177 cont_expr = "(context or {})" if 'context' in sign.parameters else "self.env.context" 

178 keys_expr = "tuple(%s.get(k) for k in %r)" % (cont_expr, self.keys) 

179 self.args += (keys_expr,) 

180 super().determine_key() 

181 

182 

183def log_ormcache_stats(sig=None, frame=None): # noqa: ARG001 (arguments are there for signals) 

184 # collect and log data in a separate thread to avoid blocking the main thread 

185 # and avoid using logging module directly in the signal handler 

186 # https://docs.python.org/3/library/logging.html#thread-safety 

187 global _logger_state # noqa: PLW0603 

188 with _logger_lock: 

189 if _logger_state != 'wait': 

190 # send the signal again to stop the logging thread 

191 _logger_state = 'abort' 

192 return 

193 _logger_state = 'run' 

194 

195 def check_continue_logging(): 

196 if _logger_state == 'run': 

197 return True 

198 _logger.info('Stopping logging ORM cache stats') 

199 return False 

200 

201 class StatsLine: 

202 def __init__(self, method, counter: ormcache_counter): 

203 self.sz_entries_sum: int = 0 

204 self.sz_entries_max: int = 0 

205 self.nb_entries: int = 0 

206 self.counter = counter 

207 self.method = method 

208 

209 def _log_ormcache_stats(): 

210 """ Log statistics of ormcache usage by database, model, and method. """ 

211 from odoo.modules.registry import Registry # noqa: PLC0415 

212 try: 

213 # {dbname: {method: StatsLine}} 

214 cache_stats: defaultdict[str, dict[Callable, StatsLine]] = defaultdict(dict) 

215 # {dbname: (cache_name, entries, count, total_size)} 

216 cache_usage: defaultdict[str, list[tuple[str, int, int, int]]] = defaultdict(list) 

217 

218 # browse the values in cache 

219 registries = Registry.registries.snapshot 

220 class_slots = {} 

221 for i, (dbname, registry) in enumerate(registries.items(), start=1): 

222 if not check_continue_logging(): 

223 return 

224 _logger.info("Processing database %s (%d/%d)", dbname, i, len(registries)) 

225 db_cache_stats = cache_stats[dbname] 

226 db_cache_usage = cache_usage[dbname] 

227 for cache_name, cache in registry._Registry__caches.items(): 

228 cache_total_size = 0 

229 for cache_key, cache_value in cache.snapshot.items(): 

230 method = cache_key[1] 

231 stats = db_cache_stats.get(method) 

232 if stats is None: 

233 stats = db_cache_stats[method] = StatsLine(method, _COUNTERS[dbname, method]) 

234 stats.nb_entries += 1 

235 if not show_size: 

236 continue 

237 size = get_cache_size((cache_key, cache_value), cache_info=method.__qualname__, class_slots=class_slots) 

238 cache_total_size += size 

239 stats.sz_entries_sum += size 

240 stats.sz_entries_max = max(stats.sz_entries_max, size) 

241 db_cache_usage.append((cache_name, len(cache), cache.count, cache_total_size)) 

242 

243 # add counters that have no values in cache 

244 for (dbname, method), counter in _COUNTERS.copy().items(): # copy to avoid concurrent modification 

245 if not check_continue_logging(): 

246 return 

247 db_cache_stats = cache_stats[dbname] 

248 stats = db_cache_stats.get(method) 

249 if stats is None: 

250 db_cache_stats[method] = StatsLine(method, counter) 

251 

252 # Output the stats 

253 log_msgs = ['Caches stats:'] 

254 size_column_info = ( 

255 f"{'Memory %':>10}," 

256 f"{'Memory SUM':>12}," 

257 f"{'Memory MAX':>12}," 

258 ) if show_size else '' 

259 column_info = ( 

260 f"{'Cache Name':>25}," 

261 f"{'Entry':>7}," 

262 f"{size_column_info}" 

263 f"{'Hit':>6}," 

264 f"{'Miss':>6}," 

265 f"{'Err':>6}," 

266 f"{'Gen Time [s]':>13}," 

267 f"{'Hit Ratio':>10}," 

268 f"{'TX Hit Ratio':>13}," 

269 f"{'TX Call':>8}," 

270 " Method" 

271 ) 

272 

273 for dbname, db_cache_stats in sorted(cache_stats.items(), key=lambda k: k[0] or '~'): 

274 if not check_continue_logging(): 

275 return 

276 log_msgs.append(f'Database {dbname or "<no_db>"}:') 

277 log_msgs.extend( 

278 f" * {cache_name}: {entries}/{count}{' (' if cache_total_size else ''}{cache_total_size}{' bytes)' if cache_total_size else ''}" 

279 for cache_name, entries, count, cache_total_size in db_cache_usage 

280 ) 

281 log_msgs.append('Details:') 

282 

283 # sort by -sz_entries_sum and method_name 

284 db_cache_stat = sorted(db_cache_stats.items(), key=lambda k: (-k[1].sz_entries_sum, k[0].__name__)) 

285 sz_entries_all = sum(stat.sz_entries_sum for _, stat in db_cache_stat) 

286 log_msgs.append(column_info) 

287 for method, stat in db_cache_stat: 

288 size_data = ( 

289 f'{stat.sz_entries_sum / (sz_entries_all or 1) * 100:9.1f}%,' 

290 f'{stat.sz_entries_sum:12d},' 

291 f'{stat.sz_entries_max:12d},' 

292 ) if show_size else '' 

293 log_msgs.append( 

294 f'{stat.counter.cache_name:>25},' 

295 f'{stat.nb_entries:7d},' 

296 f'{size_data}' 

297 f'{stat.counter.hit:6d},' 

298 f'{stat.counter.miss:6d},' 

299 f'{stat.counter.err:6d},' 

300 f'{stat.counter.gen_time:13.3f},' 

301 f'{stat.counter.ratio:9.1f}%,' 

302 f'{stat.counter.tx_ratio:12.1f}%,' 

303 f'{stat.counter.tx_calls:8d},' 

304 f' {method.__qualname__}' 

305 ) 

306 _logger.info('\n'.join(log_msgs)) 

307 except Exception: # noqa: BLE001 

308 _logger.exception() 

309 finally: 

310 global _logger_state # noqa: PLW0603 

311 with _logger_lock: 

312 _logger_state = 'wait' 

313 

314 show_size = False 

315 if sig == signal.SIGUSR1: 

316 threading.Thread(target=_log_ormcache_stats, 

317 name="odoo.signal.log_ormcache_stats").start() 

318 elif sig == signal.SIGUSR2: 

319 show_size = True 

320 threading.Thread(target=_log_ormcache_stats, 

321 name="odoo.signal.log_ormcache_stats_with_size").start() 

322 

323 

324def get_cache_key_counter(bound_method: Callable, *args, **kwargs) -> tuple[LRU, tuple, ormcache_counter]: 

325 """ Return the cache, key and stat counter for the given call. """ 

326 model: BaseModel = bound_method.__self__ # type: ignore 

327 ormcache_instance: ormcache = bound_method.__cache__ # type: ignore 

328 cache: LRU = model.pool._Registry__caches[ormcache_instance.cache_name] # type: ignore 

329 key = ormcache_instance.key(model, *args, **kwargs) 

330 counter = _COUNTERS[model.pool.db_name, ormcache_instance.method] 

331 return cache, key, counter 

332 

333 

334def get_cache_size( 

335 obj, 

336 *, 

337 cache_info: str = '', 

338 seen_ids: set[int] | None = None, 

339 class_slots: dict[type, Iterable[str]] | None = None 

340 ) -> int: 

341 """ A non-thread-safe recursive object size estimator """ 

342 from odoo.models import BaseModel # noqa: PLC0415 

343 from odoo.api import Environment # noqa: PLC0415 

344 

345 if seen_ids is None: 

346 # count internal constants as 0 bytes 

347 seen_ids = set(map(id, (None, False, True))) 

348 if class_slots is None: 

349 class_slots = {} # {class_id: combined_slots} 

350 total_size = 0 

351 objects = [obj] 

352 

353 while objects: 

354 cur_obj = objects.pop() 

355 if id(cur_obj) in seen_ids: 

356 continue 

357 

358 if cache_info and isinstance(cur_obj, (BaseModel, Environment)): 

359 _logger.error('%s is cached by %s', cur_obj, cache_info) 

360 continue 

361 

362 seen_ids.add(id(cur_obj)) 

363 total_size += sys.getsizeof(cur_obj) 

364 

365 if hasattr(cur_obj, '__slots__'): 

366 cur_obj_cls = type(cur_obj) 

367 attributes = class_slots.get(id(cur_obj_cls)) 

368 if attributes is None: 

369 class_slots[id(cur_obj_cls)] = attributes = tuple({ 

370 f'_{cls.__name__}{attr}' if attr.startswith('__') else attr 

371 for cls in cur_obj_cls.mro() 

372 for attr in getattr(cls, '__slots__', ()) 

373 }) 

374 objects.extend(getattr(cur_obj, attr, None) for attr in attributes) 

375 if hasattr(cur_obj, '__dict__'): 

376 objects.append(object.__dict__) 

377 

378 if isinstance(cur_obj, Mapping): 

379 objects.extend(cur_obj.values()) 

380 objects.extend(cur_obj.keys()) 

381 elif isinstance(cur_obj, Collection) and not isinstance(cur_obj, (str, bytes, bytearray)): 

382 objects.extend(cur_obj) 

383 

384 return total_size