Coverage for adhoc-cicd-odoo-odoo / odoo / tools / profiler.py: 16%

484 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-03-09 18:05 +0000

1# Part of Odoo. See LICENSE file for full copyright and licensing details. 

2 

3from contextlib import nullcontext, ExitStack 

4from datetime import datetime 

5import json 

6import logging 

7import sys 

8import time 

9import threading 

10import re 

11import tracemalloc 

12 

13from psycopg2 import OperationalError 

14 

15from odoo import tools 

16from odoo.tools import SQL 

17 

18from .gc import disabling_gc 

19 

20 

21_logger = logging.getLogger(__name__) 

22 

23# ensure we have a non patched time for profiling times when using freezegun 

24real_datetime_now = datetime.now 

25real_time = time.time.__call__ 

26real_cpu_time = time.thread_time.__call__ 

27 

28 

29def _format_frame(frame): 

30 code = frame.f_code 

31 return (code.co_filename, frame.f_lineno, code.co_name, '') 

32 

33 

34def _format_stack(stack): 

35 return [list(frame) for frame in stack] 

36 

37 

38def get_current_frame(thread=None): 

39 if thread: 

40 frame = sys._current_frames()[thread.ident] 

41 else: 

42 frame = sys._getframe() 

43 while frame.f_code.co_filename == __file__: 

44 frame = frame.f_back 

45 return frame 

46 

47 

48def _get_stack_trace(frame, limit_frame=None): 

49 stack = [] 

50 while frame is not None and frame != limit_frame: 

51 stack.append(_format_frame(frame)) 

52 frame = frame.f_back 

53 if frame is None and limit_frame: 

54 _logger.runbot("Limit frame was not found") 

55 return list(reversed(stack)) 

56 

57 

58def stack_size(): 

59 frame = get_current_frame() 

60 size = 0 

61 while frame: 

62 size += 1 

63 frame = frame.f_back 

64 return size 

65 

66 

67def make_session(name=''): 

68 return f'{real_datetime_now():%Y-%m-%d %H:%M:%S} {name}' 

69 

70 

71def force_hook(): 

72 """ 

73 Force periodic profiling collectors to generate some stack trace. This is 

74 useful before long calls that do not release the GIL, so that the time 

75 spent in those calls is attributed to a specific stack trace, instead of 

76 some arbitrary former frame. 

77 """ 

78 thread = threading.current_thread() 

79 for func in getattr(thread, 'profile_hooks', ()): 

80 func() 

81 

82 

83class Collector: 

84 """ 

85 Base class for objects that collect profiling data. 

86 

87 A collector object is used by a profiler to collect profiling data, most 

88 likely a list of stack traces with time and some context information added 

89 by ExecutionContext decorator on current thread. 

90 

91 This is a generic implementation of a basic collector, to be inherited. 

92 It defines default behaviors for creating an entry in the collector. 

93 """ 

94 name = None # symbolic name of the collector 

95 _store = name 

96 _registry = {} # map collector names to their class 

97 

98 @classmethod 

99 def __init_subclass__(cls): 

100 if cls.name: 

101 cls._registry[cls.name] = cls 

102 cls._registry[cls.__name__] = cls 

103 

104 @classmethod 

105 def make(cls, name, *args, **kwargs): 

106 """ Instantiate a collector corresponding to the given name. """ 

107 return cls._registry[name](*args, **kwargs) 

108 

109 def __init__(self): 

110 self._processed = False 

111 self._entries = [] 

112 self.profiler = None 

113 

114 def start(self): 

115 """ Start the collector. """ 

116 

117 def stop(self): 

118 """ Stop the collector. """ 

119 

120 def add(self, entry=None, frame=None): 

121 """ Add an entry (dict) to this collector. """ 

122 self._entries.append({ 

123 'stack': self._get_stack_trace(frame), 

124 'exec_context': getattr(self.profiler.init_thread, 'exec_context', ()), 

125 'start': real_time(), 

126 **(entry or {}), 

127 }) 

128 

129 def progress(self, entry=None, frame=None): 

130 """ Checks if the limits were met and add to the entries""" 

131 if self.profiler.entry_count_limit \ 

132 and self.profiler.counter >= self.profiler.entry_count_limit: 

133 self.profiler.end() 

134 return 

135 self.profiler.counter += 1 

136 self.add(entry=entry,frame=frame) 

137 

138 def _get_stack_trace(self, frame=None): 

139 """ Return the stack trace to be included in a given entry. """ 

140 frame = frame or get_current_frame(self.profiler.init_thread) 

141 return _get_stack_trace(frame, self.profiler.init_frame) 

142 

143 def post_process(self): 

144 for entry in self._entries: 

145 stack = entry.get('stack', []) 

146 self.profiler._add_file_lines(stack) 

147 

148 @property 

149 def entries(self): 

150 """ Return the entries of the collector after postprocessing. """ 

151 if not self._processed: 

152 self.post_process() 

153 self.processed_entries = self._entries 

154 self._entries = None # avoid modification after processing 

155 self._processed = True 

156 return self.processed_entries 

157 

158 def summary(self): 

159 return f"{'='*10} {self.name} {'='*10} \n Entries: {len(self._entries)}" 

160 

161 

162class SQLCollector(Collector): 

163 """ 

164 Saves all executed queries in the current thread with the call stack. 

165 """ 

166 name = 'sql' 

167 

168 def start(self): 

169 init_thread = self.profiler.init_thread 

170 if not hasattr(init_thread, 'query_hooks'): 

171 init_thread.query_hooks = [] 

172 init_thread.query_hooks.append(self.hook) 

173 

174 def stop(self): 

175 self.profiler.init_thread.query_hooks.remove(self.hook) 

176 

177 def hook(self, cr, query, params, query_start, query_time): 

178 self.progress({ 

179 'query': str(query), 

180 'full_query': str(cr._format(query, params)), 

181 'start': query_start, 

182 'time': query_time, 

183 }) 

184 

185 def summary(self): 

186 total_time = sum(entry['time'] for entry in self._entries) or 1 

187 sql_entries = '' 

188 for entry in self._entries: 

189 sql_entries += f"\n{'-' * 100}'\n'{entry['time']} {'*' * int(entry['time'] / total_time * 100)}'\n'{entry['full_query']}" 

190 return super().summary() + sql_entries 

191 

192 

193class _BasePeriodicCollector(Collector): 

194 """ 

195 Record execution frames asynchronously at most every `interval` seconds. 

196 

197 :param interval (float): time to wait in seconds between two samples. 

198 """ 

199 _min_interval = 0.001 # minimum interval allowed 

200 _max_interval = 5 # maximum interval allowed 

201 _default_interval = 0.001 

202 

203 def __init__(self, interval=None): # check duration. dynamic? 

204 super().__init__() 

205 self.active = False 

206 self.frame_interval = interval or self._default_interval 

207 self.__thread = threading.Thread(target=self.run) 

208 self.last_frame = None 

209 self._stop_event = threading.Event() 

210 

211 def start(self): 

212 interval = self.profiler.params.get(f'{self.name}_interval') 

213 if interval: 

214 self.frame_interval = min(max(float(interval), self._min_interval), self._max_interval) 

215 init_thread = self.profiler.init_thread 

216 if not hasattr(init_thread, 'profile_hooks'): 

217 init_thread.profile_hooks = [] 

218 init_thread.profile_hooks.append(self.progress) 

219 self.__thread.start() 

220 

221 def run(self): 

222 self.active = True 

223 self.last_time = real_time() 

224 while self.active: # maybe add a check on parent_thread state? 

225 self.progress() 

226 self._stop_event.wait(self.frame_interval) 

227 

228 def stop(self): 

229 self.active = False 

230 self._stop_event.set() 

231 self._entries.append({'stack': [], 'start': real_time()}) # add final end frame 

232 if self.__thread.is_alive() and self.__thread is not threading.current_thread(): 

233 self.__thread.join() 

234 self.profiler.init_thread.profile_hooks.remove(self.progress) 

235 

236 

237class PeriodicCollector(_BasePeriodicCollector): 

238 

239 name = 'traces_async' 

240 

241 def add(self, entry=None, frame=None): 

242 """ Add an entry (dict) to this collector. """ 

243 if self.last_frame: 

244 duration = real_time() - self._last_time 

245 if duration > self.frame_interval * 10 and self.last_frame: 

246 # The profiler has unexpectedly slept for more than 10 frame intervals. This may 

247 # happen when calling a C library without releasing the GIL. In that case, the 

248 # last frame was taken before the call, and the next frame is after the call, and 

249 # the call itself does not appear in any of those frames: the duration of the call 

250 # is incorrectly attributed to the last frame. 

251 self._entries[-1]['stack'].append(('profiling', 0, '⚠ Profiler freezed for %s s' % duration, '')) 

252 self.last_frame = None # skip duplicate detection for the next frame. 

253 self._last_time = real_time() 

254 

255 frame = frame or get_current_frame(self.profiler.init_thread) 

256 if frame == self.last_frame: 

257 # don't save if the frame is exactly the same as the previous one. 

258 # maybe modify the last entry to add a last seen? 

259 return 

260 self.last_frame = frame 

261 super().add(entry=entry, frame=frame) 

262 

263 

264_lock = threading.Lock() 

265 

266 

267class MemoryCollector(_BasePeriodicCollector): 

268 

269 name = 'memory' 

270 _store = 'others' 

271 _min_interval = 0.01 # minimum interval allowed 

272 _default_interval = 1 

273 

274 def start(self): 

275 _lock.acquire() 

276 tracemalloc.start() 

277 super().start() 

278 

279 def add(self, entry=None, frame=None): 

280 """ Add an entry (dict) to this collector. """ 

281 self._entries.append({ 

282 'start': real_time(), 

283 'memory': tracemalloc.take_snapshot(), 

284 }) 

285 

286 def stop(self): 

287 super().stop() 

288 _lock.release() 

289 tracemalloc.stop() 

290 

291 def post_process(self): 

292 for i, entry in enumerate(self._entries): 

293 if entry.get("memory", False): 

294 entry_statistics = entry["memory"].statistics('traceback') 

295 modified_entry_statistics = [{'traceback': list(statistic.traceback._frames), 

296 'size': statistic.size} for statistic in entry_statistics] 

297 self._entries[i] = {"memory_tracebacks": modified_entry_statistics, "start": entry['start']} 

298 

299 

300class SyncCollector(Collector): 

301 """ 

302 Record complete execution synchronously. 

303 Note that --limit-memory-hard may need to be increased when launching Odoo. 

304 """ 

305 name = 'traces_sync' 

306 

307 def start(self): 

308 if sys.gettrace() is not None: 

309 _logger.error("Cannot start SyncCollector, settrace already set: %s", sys.gettrace()) 

310 assert not self._processed, "You cannot start SyncCollector after accessing entries." 

311 sys.settrace(self.hook) # todo test setprofile, but maybe not multithread safe 

312 

313 def stop(self): 

314 sys.settrace(None) 

315 

316 def hook(self, _frame, event, _arg=None): 

317 if event == 'line': 

318 return 

319 entry = {'event': event, 'frame': _format_frame(_frame)} 

320 if event == 'call' and _frame.f_back: 

321 # we need the parent frame to determine the line number of the call 

322 entry['parent_frame'] = _format_frame(_frame.f_back) 

323 self.progress(entry, frame=_frame) 

324 return self.hook 

325 

326 def _get_stack_trace(self, frame=None): 

327 # Getting the full stack trace is slow, and not useful in this case. 

328 # SyncCollector only saves the top frame and event at each call and 

329 # recomputes the complete stack at the end. 

330 return None 

331 

332 def post_process(self): 

333 # Transform the evented traces to full stack traces. This processing 

334 # could be avoided since speedscope will transform that back to 

335 # evented anyway, but it is actually simpler to integrate into the 

336 # current speedscope logic, especially when mixed with SQLCollector. 

337 # We could improve it by saving as evented and manage it later. 

338 stack = [] 

339 for entry in self._entries: 

340 frame = entry.pop('frame') 

341 event = entry.pop('event') 

342 if event == 'call': 

343 if stack: 

344 stack[-1] = entry.pop('parent_frame') 

345 stack.append(frame) 

346 elif event == 'return': 

347 stack.pop() 

348 entry['stack'] = stack[:] 

349 super().post_process() 

350 

351 

352class QwebTracker(): 

353 

354 def __init__(self, view_id, arch, cr): 

355 current_thread = threading.current_thread() # don't store current_thread on self 

356 self.execution_context_enabled = getattr(current_thread, 'profiler_params', {}).get('execution_context_qweb') 

357 self.qweb_hooks = getattr(current_thread, 'qweb_hooks', ()) 

358 self.context_stack = [] 

359 self.cr = cr 

360 self.view_id = view_id 

361 for hook in self.qweb_hooks: 

362 hook('render', self.cr.sql_log_count, view_id=view_id, arch=arch) 

363 

364 def enter_directive(self, directive, attrib, xpath): 

365 execution_context = None 

366 if self.execution_context_enabled: 

367 directive_info = {} 

368 if ('t-' + directive) in attrib: 

369 directive_info['t-' + directive] = repr(attrib['t-' + directive]) 

370 if directive == 'set': 

371 if 't-value' in attrib: 

372 directive_info['t-value'] = repr(attrib['t-value']) 

373 if 't-valuef' in attrib: 

374 directive_info['t-valuef'] = repr(attrib['t-valuef']) 

375 

376 for key in attrib: 

377 if key.startswith('t-set-') or key.startswith('t-setf-'): 

378 directive_info[key] = repr(attrib[key]) 

379 elif directive == 'foreach': 

380 directive_info['t-as'] = repr(attrib['t-as']) 

381 elif directive == 'groups' and 'groups' in attrib and not directive_info.get('t-groups'): 

382 directive_info['t-groups'] = repr(attrib['groups']) 

383 elif directive == 'att': 

384 for key in attrib: 

385 if key.startswith('t-att-') or key.startswith('t-attf-'): 

386 directive_info[key] = repr(attrib[key]) 

387 elif directive == 'options': 

388 for key in attrib: 

389 if key.startswith('t-options-'): 

390 directive_info[key] = repr(attrib[key]) 

391 elif ('t-' + directive) not in attrib: 

392 directive_info['t-' + directive] = None 

393 

394 execution_context = tools.profiler.ExecutionContext(**directive_info, xpath=xpath) 

395 execution_context.__enter__() 

396 self.context_stack.append(execution_context) 

397 

398 for hook in self.qweb_hooks: 

399 hook('enter', self.cr.sql_log_count, view_id=self.view_id, xpath=xpath, directive=directive, attrib=attrib) 

400 

401 def leave_directive(self, directive, attrib, xpath): 

402 if self.execution_context_enabled: 

403 self.context_stack.pop().__exit__() 

404 

405 for hook in self.qweb_hooks: 

406 hook('leave', self.cr.sql_log_count, view_id=self.view_id, xpath=xpath, directive=directive, attrib=attrib) 

407 

408 

409class QwebCollector(Collector): 

410 """ 

411 Record qweb execution with directive trace. 

412 """ 

413 name = 'qweb' 

414 

415 def __init__(self): 

416 super().__init__() 

417 self.events = [] 

418 

419 def hook(event, sql_log_count, **kwargs): 

420 self.events.append((event, kwargs, sql_log_count, real_time())) 

421 self.hook = hook 

422 

423 def _get_directive_profiling_name(self, directive, attrib): 

424 expr = '' 

425 if directive == 'set': 

426 if 't-set' in attrib: 

427 expr = f"t-set={repr(attrib['t-set'])}" 

428 if 't-value' in attrib: 

429 expr += f" t-value={repr(attrib['t-value'])}" 

430 if 't-valuef' in attrib: 

431 expr += f" t-valuef={repr(attrib['t-valuef'])}" 

432 for key in attrib: 

433 if key.startswith('t-set-') or key.startswith('t-setf-'): 

434 if expr: 

435 expr += ' ' 

436 expr += f"{key}={repr(attrib[key])}" 

437 elif directive == 'foreach': 

438 expr = f"t-foreach={repr(attrib['t-foreach'])} t-as={repr(attrib['t-as'])}" 

439 elif directive == 'options': 

440 if attrib.get('t-options'): 

441 expr = f"t-options={repr(attrib['t-options'])}" 

442 for key in attrib: 

443 if key.startswith('t-options-'): 

444 expr = f"{expr} {key}={repr(attrib[key])}" 

445 elif directive == 'att': 

446 for key in attrib: 

447 if key == 't-att' or key.startswith('t-att-') or key.startswith('t-attf-'): 

448 if expr: 

449 expr += ' ' 

450 expr += f"{key}={repr(attrib[key])}" 

451 elif ('t-' + directive) in attrib: 

452 expr = f"t-{directive}={repr(attrib['t-' + directive])}" 

453 else: 

454 expr = f"t-{directive}" 

455 

456 return expr 

457 

458 def start(self): 

459 init_thread = self.profiler.init_thread 

460 if not hasattr(init_thread, 'qweb_hooks'): 

461 init_thread.qweb_hooks = [] 

462 init_thread.qweb_hooks.append(self.hook) 

463 

464 def stop(self): 

465 self.profiler.init_thread.qweb_hooks.remove(self.hook) 

466 

467 def post_process(self): 

468 last_event_query = None 

469 last_event_time = None 

470 stack = [] 

471 results = [] 

472 archs = {} 

473 for event, kwargs, sql_count, time in self.events: 

474 if event == 'render': 

475 archs[kwargs['view_id']] = kwargs['arch'] 

476 continue 

477 

478 # update the active directive with the elapsed time and queries 

479 if stack: 

480 top = stack[-1] 

481 top['delay'] += time - last_event_time 

482 top['query'] += sql_count - last_event_query 

483 last_event_time = time 

484 last_event_query = sql_count 

485 

486 directive = self._get_directive_profiling_name(kwargs['directive'], kwargs['attrib']) 

487 if directive: 

488 if event == 'enter': 

489 data = { 

490 'view_id': kwargs['view_id'], 

491 'xpath': kwargs['xpath'], 

492 'directive': directive, 

493 'delay': 0, 

494 'query': 0, 

495 } 

496 results.append(data) 

497 stack.append(data) 

498 else: 

499 assert event == "leave" 

500 data = stack.pop() 

501 

502 self.add({'results': {'archs': archs, 'data': results}}) 

503 super().post_process() 

504 

505 

506class ExecutionContext: 

507 """ 

508 Add some context on thread at current call stack level. 

509 This context stored by collector beside stack and is used by Speedscope 

510 to add a level to the stack with this information. 

511 """ 

512 def __init__(self, **context): 

513 self.context = context 

514 self.previous_context = None 

515 

516 def __enter__(self): 

517 current_thread = threading.current_thread() 

518 self.previous_context = getattr(current_thread, 'exec_context', ()) 

519 current_thread.exec_context = self.previous_context + ((stack_size(), self.context),) 

520 

521 def __exit__(self, *_args): 

522 threading.current_thread().exec_context = self.previous_context 

523 

524 

525class Profiler: 

526 """ 

527 Context manager to use to start the recording of some execution. 

528 Will save sql and async stack trace by default. 

529 """ 

530 def __init__(self, collectors=None, db=..., profile_session=None, 

531 description=None, disable_gc=False, params=None, log=False): 

532 """ 

533 :param db: database name to use to save results. 

534 Will try to define database automatically by default. 

535 Use value ``None`` to not save results in a database. 

536 :param collectors: list of string and Collector object Ex: ['sql', PeriodicCollector(interval=0.2)]. Use `None` for default collectors 

537 :param profile_session: session description to use to reproup multiple profile. use make_session(name) for default format. 

538 :param description: description of the current profiler Suggestion: (route name/test method/loading module, ...) 

539 :param disable_gc: flag to disable gc durring profiling (usefull to avoid gc while profiling, especially during sql execution) 

540 :param params: parameters usable by collectors (like frame interval) 

541 """ 

542 self.start_time = 0 

543 self.duration = 0 

544 self.start_cpu_time = 0 

545 self.cpu_duration = 0 

546 self.profile_session = profile_session or make_session() 

547 self.description = description 

548 self.init_frame = None 

549 self.init_stack_trace = None 

550 self.init_thread = None 

551 self.disable_gc = disable_gc 

552 self.filecache = {} 

553 self.params = params or {} # custom parameters usable by collectors 

554 self.profile_id = None 

555 self.log = log 

556 self.sub_profilers = [] 

557 self.entry_count_limit = int(self.params.get("entry_count_limit",0)) # the limit could be set using a smarter way 

558 self.done = False 

559 self.exit_stack = ExitStack() 

560 self.counter = 0 

561 

562 if db is ...: 

563 # determine database from current thread 

564 db = getattr(threading.current_thread(), 'dbname', None) 

565 if not db: 

566 # only raise if path is not given and db is not explicitely disabled 

567 raise Exception('Database name cannot be defined automaticaly. \n Please provide a valid/falsy dbname or path parameter') 

568 self.db = db 

569 

570 # collectors 

571 if collectors is None: 

572 collectors = ['sql', 'traces_async'] 

573 self.collectors = [] 

574 for collector in collectors: 

575 if isinstance(collector, str): 

576 try: 

577 collector = Collector.make(collector) 

578 except Exception: 

579 _logger.error("Could not create collector with name %r", collector) 

580 continue 

581 collector.profiler = self 

582 self.collectors.append(collector) 

583 

584 def __enter__(self): 

585 self.init_thread = threading.current_thread() 

586 try: 

587 self.init_frame = get_current_frame(self.init_thread) 

588 self.init_stack_trace = _get_stack_trace(self.init_frame) 

589 except KeyError: 

590 # when using thread pools (gevent) the thread won't exist in the current_frames 

591 # this case is managed by http.py but will still fail when adding a profiler 

592 # inside a piece of code that may be called by a longpolling route. 

593 # in this case, avoid crashing the caller and disable all collectors 

594 self.init_frame = self.init_stack_trace = self.collectors = [] 

595 self.db = self.params = None 

596 message = "Cannot start profiler, thread not found. Is the thread part of a thread pool?" 

597 if not self.description: 

598 self.description = message 

599 _logger.warning(message) 

600 

601 if self.description is None: 

602 frame = self.init_frame 

603 code = frame.f_code 

604 self.description = f"{frame.f_code.co_name} ({code.co_filename}:{frame.f_lineno})" 

605 if self.params: 

606 self.init_thread.profiler_params = self.params 

607 if self.disable_gc: 

608 self.exit_stack.enter_context(disabling_gc()) 

609 self.start_time = real_time() 

610 self.start_cpu_time = real_cpu_time() 

611 for collector in self.collectors: 

612 collector.start() 

613 return self 

614 

615 def __exit__(self, *args): 

616 self.end() 

617 

618 def end(self): 

619 if self.done: 

620 return 

621 self.done = True 

622 try: 

623 for collector in self.collectors: 

624 collector.stop() 

625 self.duration = real_time() - self.start_time 

626 self.cpu_duration = real_cpu_time() - self.start_cpu_time 

627 self._add_file_lines(self.init_stack_trace) 

628 

629 if self.db: 

630 # pylint: disable=import-outside-toplevel 

631 from odoo.sql_db import db_connect # only import from odoo if/when needed. 

632 with db_connect(self.db).cursor() as cr: 

633 values = { 

634 "name": self.description, 

635 "session": self.profile_session, 

636 "create_date": real_datetime_now(), 

637 "init_stack_trace": json.dumps(_format_stack(self.init_stack_trace)), 

638 "duration": self.duration, 

639 "cpu_duration": self.cpu_duration, 

640 "entry_count": self.entry_count(), 

641 "sql_count": sum(len(collector.entries) for collector in self.collectors if collector.name == 'sql') 

642 } 

643 others = {} 

644 for collector in self.collectors: 

645 if collector.entries: 

646 if collector._store == "others": 

647 others[collector.name] = json.dumps(collector.entries) 

648 else: 

649 values[collector.name] = json.dumps(collector.entries) 

650 if others: 

651 values['others'] = json.dumps(others) 

652 query = SQL( 

653 "INSERT INTO ir_profile(%s) VALUES %s RETURNING id", 

654 SQL(",").join(map(SQL.identifier, values)), 

655 tuple(values.values()), 

656 ) 

657 cr.execute(query) 

658 self.profile_id = cr.fetchone()[0] 

659 _logger.info('ir_profile %s (%s) created', self.profile_id, self.profile_session) 

660 except OperationalError: 

661 _logger.exception("Could not save profile in database") 

662 finally: 

663 self.exit_stack.close() 

664 if self.params: 

665 del self.init_thread.profiler_params 

666 if self.log: 

667 _logger.info(self.summary()) 

668 

669 def _get_cm_proxy(self): 

670 return Nested(self) 

671 

672 def _add_file_lines(self, stack): 

673 for index, frame in enumerate(stack): 

674 (filename, lineno, name, line) = frame 

675 if line != '': 

676 continue 

677 # retrieve file lines from the filecache 

678 if not lineno: 

679 continue 

680 try: 

681 filelines = self.filecache[filename] 

682 except KeyError: 

683 try: 

684 with tools.file_open(filename, filter_ext=('.py',)) as f: 

685 filelines = f.readlines() 

686 except (ValueError, FileNotFoundError): # mainly for <decorator> "filename" 

687 filelines = None 

688 self.filecache[filename] = filelines 

689 # fill in the line 

690 if filelines is not None: 

691 line = filelines[lineno - 1] 

692 stack[index] = (filename, lineno, name, line) 

693 

694 def entry_count(self): 

695 """ Return the total number of entries collected in this profiler. """ 

696 return sum(len(collector.entries) for collector in self.collectors) 

697 

698 def format_path(self, path): 

699 """ 

700 Utility function to format a path for this profiler. 

701 This is mainly useful to uniquify a path between executions. 

702 """ 

703 return path.format( 

704 time=real_datetime_now().strftime("%Y%m%d-%H%M%S"), 

705 len=self.entry_count(), 

706 desc=re.sub("[^0-9a-zA-Z-]+", "_", self.description) 

707 ) 

708 

709 def json(self): 

710 """ 

711 Utility function to generate a json version of this profiler. 

712 This is useful to write profiling entries into a file, such as:: 

713 

714 with Profiler(db=None) as profiler: 

715 do_stuff() 

716 

717 filename = p.format_path('/home/foo/{desc}_{len}.json') 

718 with open(filename, 'w') as f: 

719 f.write(profiler.json()) 

720 """ 

721 return json.dumps({ 

722 "name": self.description, 

723 "session": self.profile_session, 

724 "create_date": real_datetime_now().strftime("%Y%m%d-%H%M%S"), 

725 "init_stack_trace": _format_stack(self.init_stack_trace), 

726 "duration": self.duration, 

727 "collectors": {collector.name: collector.entries for collector in self.collectors}, 

728 }, indent=4) 

729 

730 def summary(self): 

731 result = '' 

732 for profiler in [self, *self.sub_profilers]: 

733 for collector in profiler.collectors: 

734 result += f'\n{self.description}\n{collector.summary()}' 

735 return result 

736 

737 

738class Nested: 

739 """ 

740 Utility to nest another context manager inside a profiler. 

741 

742 The profiler should only be called directly in the "with" without nesting it 

743 with ExitStack. If not, the retrieval of the 'init_frame' may be incorrect 

744 and lead to an error "Limit frame was not found" when profiling. Since the 

745 stack will ignore all stack frames inside this file, the nested frames will 

746 be ignored, too. This is also why Nested() does not use 

747 contextlib.contextmanager. 

748 """ 

749 def __init__(self, profiler, context_manager=None): 

750 self._profiler__ = profiler 

751 self.context_manager = context_manager or nullcontext() 

752 

753 def __enter__(self): 

754 self._profiler__.__enter__() 

755 return self.context_manager.__enter__() 

756 

757 def __exit__(self, exc_type, exc_value, traceback): 

758 try: 

759 return self.context_manager.__exit__(exc_type, exc_value, traceback) 

760 finally: 

761 self._profiler__.__exit__(exc_type, exc_value, traceback)