Coverage for adhoc-cicd-odoo-odoo / odoo / orm / models.py: 62%
3291 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-09 18:05 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-09 18:05 +0000
1# Part of Odoo. See LICENSE file for full copyright and licensing details.
4"""
5 Object Relational Mapping module:
6 * Hierarchical structure
7 * Constraints consistency and validation, indexes
8 * Object metadata depends on its status
9 * Optimised processing by complex query (multiple actions at once)
10 * Default field values
11 * Permissions optimisation
12 * Persistent object: DB postgresql
13 * Data conversion
14 * Multi-level caching system
15 * Two different inheritance mechanisms
16 * Rich set of field types:
17 - classical (varchar, integer, boolean, ...)
18 - relational (one2many, many2one, many2many)
19 - functional
21"""
22from __future__ import annotations
24import collections
25import contextlib
26import datetime
27import functools
28import inspect
29import itertools
30import io
31import json
32import logging
33import pytz
34import re
35import typing
36import uuid
37import warnings
38from collections import defaultdict, deque
39from collections.abc import Callable, Mapping
40from inspect import getmembers
41from operator import attrgetter, itemgetter
43import babel
44import babel.dates
45import psycopg2.errors
46import psycopg2.extensions
47from psycopg2.extras import Json
49from odoo.exceptions import AccessError, LockError, MissingError, ValidationError, UserError
50from odoo.tools import (
51 clean_context, date_utils,
52 DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, format_list,
53 frozendict, get_lang, OrderedSet,
54 ormcache, partition, Query, split_every, unique,
55 SQL, sql, groupby,
56)
57from odoo.tools.constants import PREFETCH_MAX
58from odoo.tools.lru import LRU
59from odoo.tools.misc import ReversedIterable, exception_to_unicode, unquote
60from odoo.tools.translate import _, LazyTranslate
62from . import decorators as api
63from .commands import Command
64from .domains import Domain
65from .fields import Field, determine
66from .fields_misc import Id
67from .fields_temporal import Date, Datetime
68from .fields_textual import Char
70from .identifiers import NewId
71from .utils import (
72 OriginIds, check_object_name, parse_field_expr,
73 COLLECTION_TYPES, SQL_OPERATORS,
74 READ_GROUP_ALL_TIME_GRANULARITY, READ_GROUP_TIME_GRANULARITY, READ_GROUP_NUMBER_GRANULARITY,
75 SUPERUSER_ID,
76)
78if typing.TYPE_CHECKING:
79 from collections.abc import Collection, Iterable, Iterator, Reversible, Sequence
80 from types import MappingProxyType
81 from .table_objects import TableObject
82 from .environments import Environment
83 from .registry import Registry, TriggerTree
84 from .types import Self, DomainType, IdType, ModelType, ValuesType
86 T = typing.TypeVar('T')
89_lt = LazyTranslate('base')
90_logger = logging.getLogger('odoo.models')
91_unlink = logging.getLogger('odoo.models.unlink')
93regex_order = re.compile(r'''
94 ^
95 (\s*
96 (?P<term>((?P<field>[a-z0-9_]+)(\.(?P<property>[a-z0-9_]+))?(:(?P<func>[a-z_]+))?))
97 (\s+(?P<direction>desc|asc))?
98 (\s+(?P<nulls>nulls\ first|nulls\ last))?
99 \s*
100 (,|$)
101 )+
102 (?<!,)
103 $
104''', re.IGNORECASE | re.VERBOSE)
105regex_order_part_read_group = re.compile(r"""
106 \s*
107 (?P<term>(?P<field>[a-z0-9_]+)(\.([\w\.]+))?(:(?P<func>[a-z_]+))?)
108 (\s+(?P<direction>desc|asc))?
109 (\s+(?P<nulls>nulls\ first|nulls\ last))?
110 \s*
111""", re.IGNORECASE | re.VERBOSE)
112regex_field_agg = re.compile(r'(\w+)(?::(\w+)(?:\((\w+)\))?)?') # For read_group
113regex_read_group_spec = re.compile(r'(\w+)(\.([\w\.]+))?(?::(\w+))?$') # For _read_group
115AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
117INSERT_BATCH_SIZE = 100
118UPDATE_BATCH_SIZE = 100
119SQL_DEFAULT = psycopg2.extensions.AsIs("DEFAULT")
121# hacky-ish way to prevent access to a field through the ORM (except for sudo mode)
122NO_ACCESS = '.'
125def parse_read_group_spec(spec: str) -> tuple:
126 """ Return a triplet corresponding to the given field/property_name/aggregate specification. """
127 res_match = regex_read_group_spec.match(spec)
128 if not res_match: 128 ↛ 129line 128 didn't jump to line 129 because the condition on line 128 was never true
129 raise ValueError(
130 f'Invalid aggregate/groupby specification {spec!r}.\n'
131 '- Valid aggregate specification looks like "<field_name>:<agg>" example: "quantity:sum".\n'
132 '- Valid groupby specification looks like "<no_datish_field_name>" or "<datish_field_name>:<granularity>" example: "date:month" or "<properties_field_name>.<property>:<granularity>".'
133 )
135 groups = res_match.groups()
136 return groups[0], groups[2], groups[3]
139def raise_on_invalid_object_name(name):
140 if not check_object_name(name): 140 ↛ 141line 140 didn't jump to line 141 because the condition on line 140 was never true
141 msg = "The _name attribute %s is not valid." % name
142 raise ValueError(msg)
145def fix_import_export_id_paths(fieldname):
146 """
147 Fixes the id fields in import and exports, and splits field paths
148 on '/'.
150 :param str fieldname: name of the field to import/export
151 :return: split field name
152 :rtype: list of str
153 """
154 fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
155 fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
156 return fixed_external_id.split('/')
159def to_record_ids(arg) -> list[int]:
160 """ Return the record ids of ``arg``, which may be a recordset, an integer or a list of integers. """
161 if isinstance(arg, BaseModel):
162 return arg.ids
163 elif isinstance(arg, int):
164 return [arg] if arg else []
165 else:
166 return [id_ for id_ in arg if id_]
169def check_company_domain_parent_of(self, companies):
170 """ A `_check_company_domain` function that lets a record be used if either:
171 - record.company_id = False (which implies that it is shared between all companies), or
172 - record.company_id is a parent of any of the given companies.
173 """
174 if isinstance(companies, str):
175 return ['|', ('company_id', '=', False), ('company_id', 'parent_of', companies)]
177 companies = to_record_ids(companies)
178 if not companies:
179 return [('company_id', '=', False)]
181 return [('company_id', 'in', [
182 int(parent)
183 for rec in self.env['res.company'].sudo().browse(companies)
184 for parent in rec.parent_path.split('/')[:-1]
185 ] + [False])]
188def check_companies_domain_parent_of(self, companies):
189 """ A `_check_company_domain` function that lets a record be used if
190 any company in record.company_ids is a parent of any of the given companies.
191 """
192 if isinstance(companies, str):
193 return [('company_ids', 'parent_of', companies)]
195 companies = to_record_ids(companies)
196 if not companies: 196 ↛ 197line 196 didn't jump to line 197 because the condition on line 196 was never true
197 return []
199 return [('company_ids', 'in', [
200 int(parent)
201 for rec in self.env['res.company'].sudo().browse(companies)
202 for parent in rec.parent_path.split('/')[:-1]
203 ])]
206class MetaModel(type):
207 """ The metaclass of all model classes.
208 Its main purpose is to register the models per module.
209 """
210 _module_to_models__: defaultdict[str, list[MetaModel]] = defaultdict(list)
212 pool: Registry | None
213 """Reference to the registry for registry classes, otherwise it is a definition class."""
215 _field_definitions: list[Field]
216 _table_object_definitions: list[TableObject]
217 _name: str
218 _register: bool # need to define on each Model, default: True
219 _log_access: bool # when defined, add update log columns
220 _module: str | None
221 _abstract: bool
222 _auto: bool
223 _inherit: list[str] | None
225 def __new__(meta, name, bases, attrs):
226 # this prevents assignment of non-fields on recordsets
227 attrs.setdefault('__slots__', ())
228 # this collects the fields defined on the class (via Field.__set_name__())
229 attrs.setdefault('_field_definitions', [])
230 # this collects the table object definitions on the class (via TableObject.__set_name__())
231 attrs.setdefault('_table_object_definitions', [])
233 if attrs.get('_register', True):
234 # determine '_module'
235 if '_module' not in attrs: 235 ↛ 241line 235 didn't jump to line 241 because the condition on line 235 was always true
236 module = attrs['__module__']
237 assert module.startswith('odoo.addons.'), \
238 f"Invalid import of {module}.{name}, it should start with 'odoo.addons'."
239 attrs['_module'] = module.split('.')[2]
241 _inherit = attrs.get('_inherit')
242 if _inherit and isinstance(_inherit, str):
243 attrs.setdefault('_name', _inherit)
244 attrs['_inherit'] = [_inherit]
246 if not attrs.get('_name'): 246 ↛ 248line 246 didn't jump to line 248 because the condition on line 246 was never true
247 # add '.' before every uppercase letter preceded by any non-underscore char
248 attrs['_name'] = re.sub(r"(?<=[^_])([A-Z])", r".\1", name).lower()
249 _logger.warning("Class %s has no _name, please make it explicit: _name = %r", name, attrs['_name'])
251 assert attrs.get('_name')
253 return super().__new__(meta, name, bases, attrs)
255 def __init__(self, name, bases, attrs):
256 super().__init__(name, bases, attrs)
258 if '__init__' in attrs and len(inspect.signature(attrs['__init__']).parameters) != 4: 258 ↛ 259line 258 didn't jump to line 259 because the condition on line 258 was never true
259 _logger.warning("The method %s.__init__ doesn't match the new signature in module %s", name, attrs.get('__module__'))
261 if not attrs.get('_register', True):
262 return
264 # Remember which models to instantiate for this module.
265 if self._module: 265 ↛ 268line 265 didn't jump to line 268 because the condition on line 265 was always true
266 self._module_to_models__[self._module].append(self)
268 if not self._abstract and self._name not in self._inherit:
269 # this class defines a model: add magic fields
270 def add(name, field):
271 setattr(self, name, field)
272 field.__set_name__(self, name)
274 def add_default(name, field):
275 if name not in attrs:
276 setattr(self, name, field)
277 field.__set_name__(self, name)
279 # make sure `id` field is still a `fields.Id`
280 if not isinstance(self.id, Id): 280 ↛ 281line 280 didn't jump to line 281 because the condition on line 280 was never true
281 raise TypeError(f"Field {self.id} is not an instance of fields.Id")
283 if attrs.get('_log_access', self._auto):
284 from .fields_relational import Many2one # noqa: PLC0415
285 add_default('create_uid', Many2one(
286 'res.users', string='Created by', readonly=True))
287 add_default('create_date', Datetime(
288 string='Created on', readonly=True))
289 add_default('write_uid', Many2one(
290 'res.users', string='Last Updated by', readonly=True))
291 add_default('write_date', Datetime(
292 string='Last Updated on', readonly=True))
295# special columns automatically created by the ORM
296LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
297MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
299# valid SQL aggregation functions
300READ_GROUP_AGGREGATE = {
301 'sum': lambda table, expr: SQL('SUM(%s)', expr),
302 'avg': lambda table, expr: SQL('AVG(%s)', expr),
303 'max': lambda table, expr: SQL('MAX(%s)', expr),
304 'min': lambda table, expr: SQL('MIN(%s)', expr),
305 'bool_and': lambda table, expr: SQL('BOOL_AND(%s)', expr),
306 'bool_or': lambda table, expr: SQL('BOOL_OR(%s)', expr),
307 'array_agg': lambda table, expr: SQL('ARRAY_AGG(%s ORDER BY %s)', expr, SQL.identifier(table, 'id')),
308 'array_agg_distinct': lambda table, expr: SQL('ARRAY_AGG(DISTINCT %s ORDER BY %s)', expr, expr),
309 # 'recordset' aggregates will be post-processed to become recordsets
310 'recordset': lambda table, expr: SQL('ARRAY_AGG(%s ORDER BY %s)', expr, SQL.identifier(table, 'id')),
311 'count': lambda table, expr: SQL('COUNT(%s)', expr),
312 'count_distinct': lambda table, expr: SQL('COUNT(DISTINCT %s)', expr),
313}
315READ_GROUP_DISPLAY_FORMAT = {
316 # Careful with week/year formats:
317 # - yyyy (lower) must always be used, *except* for week+year formats
318 # - YYYY (upper) must always be used for week+year format
319 # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
320 # and W1 2006 for others
321 #
322 # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
323 # such as 2006-01-01 being formatted as "January 2005" in some locales.
324 # Cfr: http://babel.pocoo.org/en/latest/dates.html#date-fields
325 'hour': 'hh:00 dd MMM',
326 'day': 'dd MMM yyyy', # yyyy = normal year
327 'week': "'W'w YYYY", # w YYYY = ISO week-year
328 'month': 'MMMM yyyy',
329 'quarter': 'QQQ yyyy',
330 'year': 'yyyy',
331}
334class BaseModel(metaclass=MetaModel):
335 """Base class for Odoo models.
337 Odoo models are created by inheriting one of the following:
339 * :class:`Model` for regular database-persisted models
341 * :class:`TransientModel` for temporary data, stored in the database but
342 automatically vacuumed every so often
344 * :class:`AbstractModel` for abstract super classes meant to be shared by
345 multiple inheriting models
347 The system automatically instantiates every model once per database. Those
348 instances represent the available models on each database, and depend on
349 which modules are installed on that database. The actual class of each
350 instance is built from the Python classes that create and inherit from the
351 corresponding model.
353 Every model instance is a "recordset", i.e., an ordered collection of
354 records of the model. Recordsets are returned by methods like
355 :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
356 explicit representation: a record is represented as a recordset of one
357 record.
359 To create a class that should not be instantiated,
360 the :attr:`~odoo.models.BaseModel._register` attribute may be set to False.
361 """
362 __slots__ = ['env', '_ids', '_prefetch_ids']
364 pool: Registry # all registry classes have a registry on the class
365 # TODO replace most usages with self.env.registry; pool is reserved for class instance
367 _fields__: dict[str, Field]
368 _fields: MappingProxyType[str, Field]
370 _auto: bool = False
371 """Whether a database table should be created.
372 If set to ``False``, override :meth:`~odoo.models.BaseModel.init`
373 to create the database table.
375 Automatically defaults to `True` for abstract models.
377 .. tip:: To create a model without any table, inherit
378 from :class:`~odoo.models.AbstractModel`.
379 """
380 _register: bool = False #: registry visibility
381 _abstract: bool = True
382 """ Whether the model is *abstract*.
384 .. seealso:: :class:`AbstractModel`
385 """
386 _transient: bool = False
387 """ Whether the model is *transient*.
389 .. seealso:: :class:`TransientModel`
390 """
392 _name: str = None #: the model name (in dot-notation, module namespace)
393 _description: str | None = None #: the model's informal name
394 _module: str | None = None #: the model's module (in the Odoo sense)
395 _custom: bool = False #: should be True for custom models only
397 _inherit: str | list[str] | tuple[str, ...] = ()
398 """Python-inherited models:
400 :type: str or list(str) or tuple(str)
402 .. note::
404 * If :attr:`._name` is set, name(s) of parent models to inherit from
405 * If :attr:`._name` is unset, name of a single model to extend in-place
406 """
407 _inherits: frozendict[str, str] = frozendict()
408 """dictionary {'parent_model': 'm2o_field'} mapping the _name of the parent business
409 objects to the names of the corresponding foreign key fields to use::
411 _inherits = {
412 'a.model': 'a_field_id',
413 'b.model': 'b_field_id'
414 }
416 implements composition-based inheritance: the new model exposes all
417 the fields of the inherited models but stores none of them:
418 the values themselves remain stored on the linked record.
420 .. warning::
422 if multiple fields with the same name are defined in the
423 :attr:`~odoo.models.Model._inherits`-ed models, the inherited field will
424 correspond to the last one (in the inherits list order).
425 """
426 _table: str = '' #: SQL table name used by model if :attr:`_auto`
427 _table_query: SQL | str | None = None #: SQL expression of the table's content (optional)
428 _table_objects: dict[str, TableObject] = frozendict() #: SQL/Table objects
429 _inherit_children: OrderedSet[str]
431 # TODO default _rec_name to ''
432 _rec_name: str | None = None #: field to use for labeling records, default: ``name``
433 _rec_names_search: list[str] | None = None #: fields to consider in ``name_search``
434 _order: str = 'id' #: default order field for searching results
435 _parent_name: str = 'parent_id' #: the many2one field used as parent field
436 _parent_store: bool = False
437 """set to True to compute parent_path field.
439 Alongside a :attr:`~.parent_path` field, sets up an indexed storage
440 of the tree structure of records, to enable faster hierarchical queries
441 on the records of the current model using the ``child_of`` and
442 ``parent_of`` domain operators.
443 """
444 _active_name: str | None = None
445 """field to use for active records, automatically set to either ``"active"``
446 or ``"x_active"``.
447 """
448 _fold_name: str = 'fold' #: field to determine folded groups in kanban views
450 _translate: bool = True # False disables translations export for this model (Old API) TODO deprecate/remove
451 _check_company_auto: bool = False
452 """On write and create, call ``_check_company`` to ensure companies
453 consistency on the relational fields having ``check_company=True``
454 as attribute.
455 """
457 _allow_sudo_commands: bool = True
458 """Allow One2many and Many2many Commands targeting this model in an environment using `sudo()` or `with_user()`.
459 By disabling this flag, security-sensitive models protect themselves
460 against malicious manipulation of One2many or Many2many fields
461 through an environment using `sudo` or a more privileged user.
462 """
464 _depends: frozendict[str, Iterable[str]] = frozendict()
465 """dependencies of models backed up by SQL views
466 ``{model_name: field_names}``, where ``field_names`` is an iterable.
467 This is only used to determine the changes to flush to database before
468 executing any search/read operations. It won't be used for cache
469 invalidation or recomputing fields.
470 """
472 id = Id()
473 display_name = Char(
474 string='Display Name',
475 compute='_compute_display_name',
476 search='_search_display_name',
477 )
479 def _valid_field_parameter(self, field, name):
480 """ Return whether the given parameter name is valid for the field. """
481 return name == 'related_sudo'
483 @api.model
484 def _post_model_setup__(self):
485 """ Method called after the model has been setup. """
486 pass
488 @property
489 def _table_sql(self) -> SQL:
490 """ Return an :class:`SQL` object that represents SQL table identifier
491 or table query.
492 """
493 table_query = self._table_query
494 if table_query and isinstance(table_query, SQL): 494 ↛ 495line 494 didn't jump to line 495 because the condition on line 494 was never true
495 table_sql = SQL("(%s)", table_query)
496 elif table_query: 496 ↛ 497line 496 didn't jump to line 497 because the condition on line 496 was never true
497 table_sql = SQL(f"({table_query})")
498 else:
499 table_sql = SQL.identifier(self._table)
500 if not self._depends: 500 ↛ 504line 500 didn't jump to line 504 because the condition on line 500 was always true
501 return table_sql
503 # add self._depends (and its transitive closure) as metadata to table_sql
504 fields_to_flush: list[Field] = []
505 models = [self]
506 while models:
507 current_model = models.pop()
508 for model_name, field_names in current_model._depends.items():
509 model = self.env[model_name]
510 models.append(model)
511 fields_to_flush.extend(model._fields[fname] for fname in field_names)
513 return SQL().join([
514 table_sql,
515 *(SQL(to_flush=field) for field in fields_to_flush),
516 ])
518 @property
519 def _constraint_methods(self):
520 """ Return a list of methods implementing Python constraints. """
521 def is_constraint(func):
522 return callable(func) and hasattr(func, '_constrains')
524 def wrap(func, names):
525 # wrap func into a proxy function with explicit '_constrains'
526 @api.constrains(*names)
527 def wrapper(self):
528 return func(self)
529 return wrapper
531 cls = self.env.registry[self._name]
532 methods = []
533 for attr, func in getmembers(cls, is_constraint):
534 if callable(func._constrains):
535 func = wrap(func, func._constrains(self.sudo()))
536 for name in func._constrains:
537 field = cls._fields.get(name)
538 if not field: 538 ↛ 539line 538 didn't jump to line 539 because the condition on line 538 was never true
539 _logger.warning("method %s.%s: @constrains parameter %r is not a field name", cls._name, attr, name)
540 elif not (field.store or field.inverse or field.inherited): 540 ↛ 541line 540 didn't jump to line 541 because the condition on line 540 was never true
541 _logger.warning("method %s.%s: @constrains parameter %r is not writeable", cls._name, attr, name)
542 methods.append(func)
544 # optimization: memoize result on cls, it will not be recomputed
545 cls._constraint_methods = methods
546 return methods
548 @property
549 def _ondelete_methods(self):
550 """ Return a list of methods implementing checks before unlinking. """
551 def is_ondelete(func):
552 return callable(func) and hasattr(func, '_ondelete')
554 cls = self.env.registry[self._name]
555 methods = [func for _, func in getmembers(cls, is_ondelete)]
556 # optimization: memoize results on cls, it will not be recomputed
557 cls._ondelete_methods = methods
558 return methods
560 @property
561 def _onchange_methods(self):
562 """ Return a dictionary mapping field names to onchange methods. """
563 def is_onchange(func):
564 return callable(func) and hasattr(func, '_onchange')
566 # collect onchange methods on the model's class
567 cls = self.env.registry[self._name]
568 methods = defaultdict(list)
569 for _attr, func in getmembers(cls, is_onchange):
570 missing = []
571 for name in func._onchange:
572 if name not in cls._fields:
573 missing.append(name)
574 methods[name].append(func)
575 if missing:
576 _logger.warning(
577 "@api.onchange%r parameters must be field names -> not valid: %s",
578 func._onchange, missing
579 )
581 # add onchange methods to implement "change_default" on fields
582 def onchange_default(field, self):
583 value = field.convert_to_write(self[field.name], self)
584 condition = "%s=%s" % (field.name, value)
585 defaults = self.env['ir.default']._get_model_defaults(self._name, condition)
586 self.update(defaults)
588 for name, field in cls._fields.items():
589 if field.change_default:
590 methods[name].append(functools.partial(onchange_default, field))
592 # optimization: memoize result on cls, it will not be recomputed
593 cls._onchange_methods = methods
594 return methods
596 def _is_an_ordinary_table(self):
597 return self.pool.is_an_ordinary_table(self)
599 def __ensure_xml_id(self, skip=False):
600 """ Create missing external ids for records in ``self``, and return an
601 iterator of pairs ``(record, xmlid)`` for the records in ``self``.
603 :rtype: Iterable[Model, str | None]
604 """
605 if skip:
606 return ((record, None) for record in self)
608 if not self:
609 return iter([])
611 if not self._is_an_ordinary_table():
612 raise Exception(
613 "You can not export the column ID of model %s, because the "
614 "table %s is not an ordinary table."
615 % (self._name, self._table))
617 modname = '__export__'
619 cr = self.env.cr
620 cr.execute(SQL("""
621 SELECT res_id, module, name
622 FROM ir_model_data
623 WHERE model = %s AND res_id IN %s
624 """, self._name, tuple(self.ids)))
625 xids = {
626 res_id: (module, name)
627 for res_id, module, name in cr.fetchall()
628 }
629 def to_xid(record_id):
630 (module, name) = xids[record_id]
631 return ('%s.%s' % (module, name)) if module else name
633 # create missing xml ids
634 missing = self.filtered(lambda r: r.id not in xids)
635 if not missing:
636 return (
637 (record, to_xid(record.id))
638 for record in self
639 )
641 xids.update(
642 (r.id, (modname, '%s_%s_%s' % (
643 r._table,
644 r.id,
645 uuid.uuid4().hex[:8],
646 )))
647 for r in missing
648 )
649 fields = ['module', 'model', 'name', 'res_id']
651 # disable eventual async callback / support for the extent of
652 # the COPY FROM, as these are apparently incompatible
653 callback = psycopg2.extensions.get_wait_callback()
654 psycopg2.extensions.set_wait_callback(None)
655 try:
656 cr.copy_from(io.StringIO(
657 u'\n'.join(
658 u"%s\t%s\t%s\t%d" % (
659 modname,
660 record._name,
661 xids[record.id][1],
662 record.id,
663 )
664 for record in missing
665 )),
666 table='ir_model_data',
667 columns=fields,
668 )
669 finally:
670 psycopg2.extensions.set_wait_callback(callback)
671 self.env['ir.model.data'].invalidate_model(fields)
673 return (
674 (record, to_xid(record.id))
675 for record in self
676 )
678 def _export_rows(self, fields, *, _is_toplevel_call=True):
679 """ Export fields of the records in ``self``.
681 :param list fields: list of lists of fields to traverse
682 :param bool _is_toplevel_call:
683 used when recursing, avoid using when calling from outside
684 :return: list of lists of corresponding values
685 """
686 import_compatible = self.env.context.get('import_compat', True)
687 lines = []
690 if not _is_toplevel_call:
691 # {properties_field: {property_name: [property_type, {record_id: value}]}}
692 cache_properties = self.env.cr.cache['export_properties_cache']
693 else:
694 cache_properties = self.env.cr.cache['export_properties_cache'] = defaultdict(dict)
696 def fill_properties_cache(records, fnames_by_path, fname):
697 """ Fill the cache for the ``fname`` properties field and return it """
698 cache_properties_field = cache_properties[records._fields[fname]]
700 # read properties to have all the logic of Properties.convert_to_read_multi
701 for row in records.read([fname]):
702 properties = row[fname]
703 if not properties:
704 continue
705 rec_id = row['id']
707 for property in properties:
708 current_prop_name = property['name']
709 if f"{fname}.{current_prop_name}" not in fnames_by_path:
710 continue
711 property_type = property['type']
712 if current_prop_name not in cache_properties_field:
713 cache_properties_field[current_prop_name] = [property_type, {}]
715 __, cache_by_id = cache_properties_field[current_prop_name]
716 if rec_id in cache_by_id:
717 continue
719 value = property.get('value')
720 if property_type in ('many2one', 'many2many'):
721 if not isinstance(value, list):
722 value = [value] if value else []
723 value = self.env[property['comodel']].browse([val[0] for val in value])
724 elif property_type == 'tags' and value:
725 value = ",".join(
726 next(iter(tag[1] for tag in property['tags'] if tag[0] == v), '')
727 for v in value
728 )
729 elif property_type == 'selection':
730 value = dict(property['selection']).get(value, '')
731 cache_by_id[rec_id] = value
733 def fetch_fields(records, field_paths):
734 """ Fill the cache of ``records`` for all ``field_paths`` recursively included properties"""
735 if not records:
736 return
738 fnames_by_path = dict(groupby(
739 [path for path in field_paths if path and path[0] not in ('id', '.id')],
740 lambda path: path[0],
741 ))
743 # Fetch needed fields (remove '.property_name' part)
744 fnames = list(unique(fname.split('.')[0] for fname in fnames_by_path))
745 records.fetch(fnames)
746 # Fill the cache of the properties field
747 for fname in fnames:
748 field = records._fields[fname]
749 if field.type == 'properties':
750 fill_properties_cache(records, fnames_by_path, fname)
752 # Call it recursively for relational field (included property relational field)
753 for fname, paths in fnames_by_path.items():
754 if '.' in fname: # Properties field
755 fname, prop_name = fname.split('.')
756 field = records._fields[fname]
757 assert field.type == 'properties' and prop_name
759 property_type, property_cache = cache_properties[field].get(prop_name, ('char', None))
760 if property_type not in ('many2one', 'many2many') or not property_cache:
761 continue
762 model = next(iter(property_cache.values())).browse()
763 subrecords = model.union(*[property_cache[rec_id] for rec_id in records.ids if rec_id in property_cache])
764 else: # Normal field
765 field = records._fields[fname]
766 if not field.relational:
767 continue
768 subrecords = records[fname]
770 paths = [path[1:] or ['display_name'] for path in paths]
771 fetch_fields(subrecords, paths)
773 fetch_fields(self, fields)
775 for record in self:
776 # main line of record, initially empty
777 current = [''] * len(fields)
778 lines.append(current)
780 # list of primary fields followed by secondary field(s)
781 primary_done = []
783 # process column by column
784 for i, path in enumerate(fields):
785 if not path:
786 continue
788 name = path[0]
789 if name in primary_done:
790 continue
792 if name == '.id':
793 current[i] = str(record.id)
794 elif name == 'id':
795 current[i] = (record._name, record.id)
796 else:
797 prop_name = None
798 if '.' in name: # Properties field
799 fname, prop_name = name.split('.')
800 field = record._fields[fname]
801 field_type, cache_value = cache_properties[field].get(prop_name, ('char', None))
802 value = cache_value.get(record.id, '') if cache_value else ''
803 else: # Normal field
804 field = record._fields[name]
805 field_type = field.type
806 value = record[name]
808 # this part could be simpler, but it has to be done this way
809 # in order to reproduce the former behavior
810 if not isinstance(value, BaseModel):
811 current[i] = field.convert_to_export(value, record)
813 elif import_compatible and field_type == 'reference':
814 current[i] = f"{value._name},{value.id}"
816 else:
817 primary_done.append(name)
818 # recursively export the fields that follow name; use
819 # 'display_name' where no subfield is exported
820 fields2 = [(p[1:] or ['display_name'] if p and p[0] == name else [])
821 for p in fields]
823 # in import_compat mode, m2m should always be exported as
824 # a comma-separated list of xids or names in a single cell
825 if import_compatible and field_type == 'many2many':
826 index = None
827 # find out which subfield the user wants & its
828 # location as we might not get it as the first
829 # column we encounter
830 for name in ['id', 'name', 'display_name']:
831 with contextlib.suppress(ValueError):
832 index = fields2.index([name])
833 break
834 if index is None:
835 # not found anything, assume we just want the
836 # display_name in the first column
837 name = None
838 index = i
840 if name == 'id':
841 xml_ids = [xid for _, xid in value.__ensure_xml_id()]
842 current[index] = ','.join(xml_ids)
843 else:
844 current[index] = ','.join(value.mapped('display_name')) if value else ''
845 continue
847 lines2 = value._export_rows(fields2, _is_toplevel_call=False)
848 if lines2:
849 # merge first line with record's main line
850 for j, val in enumerate(lines2[0]):
851 if val or isinstance(val, (int, float)):
852 current[j] = val
853 # append the other lines at the end
854 lines += lines2[1:]
855 else:
856 current[i] = ''
858 # if any xid should be exported, only do so at toplevel
859 if _is_toplevel_call and any(f[-1] == 'id' for f in fields):
860 bymodels = collections.defaultdict(set)
861 xidmap = collections.defaultdict(list)
862 # collect all the tuples in "lines" (along with their coordinates)
863 for i, line in enumerate(lines):
864 for j, cell in enumerate(line):
865 if isinstance(cell, tuple):
866 bymodels[cell[0]].add(cell[1])
867 xidmap[cell].append((i, j))
868 # for each model, xid-export everything and inject in matrix
869 for model, ids in bymodels.items():
870 for record, xid in self.env[model].browse(ids).__ensure_xml_id():
871 for i, j in xidmap.pop((record._name, record.id)):
872 lines[i][j] = xid
873 assert not xidmap, "failed to export xids for %s" % ', '.join('{}:{}' % it for it in xidmap.items())
875 if _is_toplevel_call:
876 self.env.cr.cache.pop('export_properties_cache', None)
878 return lines
880 def export_data(self, fields_to_export):
881 """ Export fields for selected objects
883 This method is used when exporting data via client menu
885 :param list fields_to_export: list of fields
886 :returns: dictionary with a *datas* matrix
887 :rtype: dict
888 """
889 if not (self.env.is_admin() or self.env.user.has_group('base.group_allow_export')):
890 raise UserError(_("You don't have the rights to export data. Please contact an Administrator."))
891 fields_to_export = [fix_import_export_id_paths(f) for f in fields_to_export]
892 return {'datas': self._export_rows(fields_to_export)}
894 @api.model
895 def load(self, fields, data):
896 """
897 Attempts to load the data matrix, and returns a list of ids (or
898 ``False`` if there was an error and no id could be generated) and a
899 list of messages.
901 The ids are those of the records created and saved (in database), in
902 the same order they were extracted from the file. They can be passed
903 directly to :meth:`~read`
905 :param fields: list of fields to import, at the same index as the corresponding data
906 :type fields: list(str)
907 :param data: row-major matrix of data to import
908 :type data: list(list(str))
909 :returns: {ids: list(int)|False, messages: [Message][, lastrow: int]}
910 """
911 from .fields_relational import One2many # noqa: PLC0415
913 # determine values of mode, current_module and noupdate
914 mode = self.env.context.get('mode', 'init')
915 current_module = self.env.context.get('module', '__import__')
916 noupdate = self.env.context.get('noupdate', False)
917 # add current module in context for the conversion of xml ids
918 self = self.with_context(_import_current_module=current_module)
920 cr = self.env.cr
921 savepoint = cr.savepoint()
923 fields = [fix_import_export_id_paths(f) for f in fields]
925 ids = []
926 messages = []
928 # list of (xid, vals, info) for records to be created in batch
929 batch = []
930 batch_xml_ids = set()
931 # models in which we may have created / modified data, therefore might
932 # require flushing in order to name_search: the root model and any
933 # o2m
934 creatable_models = {self._name}
935 for field_path in fields:
936 if field_path[0] in (None, 'id', '.id'):
937 continue
938 model_fields = self._fields
939 for field_name in field_path:
940 if field_name in (None, 'id', '.id'):
941 break
943 if isinstance(model_fields.get(field_name), One2many): 943 ↛ 944line 943 didn't jump to line 944 because the condition on line 943 was never true
944 comodel = model_fields[field_name].comodel_name
945 creatable_models.add(comodel)
946 model_fields = self.env[comodel]._fields
948 def flush(*, xml_id=None, model=None):
949 if not batch:
950 return
952 assert not (xml_id and model), \
953 "flush can specify *either* an external id or a model, not both"
955 if xml_id and xml_id not in batch_xml_ids:
956 if xml_id not in self.env: 956 ↛ 958line 956 didn't jump to line 958 because the condition on line 956 was always true
957 return
958 if model and model not in creatable_models: 958 ↛ 959line 958 didn't jump to line 959 because the condition on line 958 was never true
959 return
961 data_list = [
962 dict(xml_id=xid, values=vals, info=info, noupdate=noupdate)
963 for xid, vals, info in batch
964 ]
965 batch.clear()
966 batch_xml_ids.clear()
968 # try to create in batch
969 global_error_message = None
970 try:
971 with cr.savepoint():
972 recs = self._load_records(data_list, mode == 'update')
973 ids.extend(recs.ids)
974 return
975 except psycopg2.InternalError as e:
976 # broken transaction, exit and hope the source error was already logged
977 if not any(message['type'] == 'error' for message in messages):
978 info = data_list[0]['info']
979 messages.append(dict(info, type='error', message=_(u"Unknown database error: '%s'", e)))
980 return
981 except UserError as e:
982 global_error_message = dict(data_list[0]['info'], type='error', message=str(e))
983 except Exception:
984 pass
986 errors = 0
987 # try again, this time record by record
988 for i, rec_data in enumerate(data_list, 1):
989 try:
990 rec = self._load_records([rec_data], mode == 'update')
991 cr.flush() # make sure flush exceptions are raised here
992 ids.append(rec.id)
993 except psycopg2.Warning as e:
994 savepoint.rollback()
995 info = rec_data['info']
996 messages.append(dict(info, type='warning', message=str(e)))
997 except psycopg2.Error as e:
998 savepoint.rollback()
999 info = rec_data['info']
1000 pg_error_info = {'message': self._sql_error_to_message(e)}
1001 if e.diag.table_name == self._table:
1002 e_fields = get_columns_from_sql_diagnostics(self.env.cr, e.diag, check_registry=True)
1003 if len(e_fields) == 1:
1004 pg_error_info['field'] = e_fields[0]
1005 messages.append(dict(info, type='error', **pg_error_info))
1006 # Failed to write, log to messages, rollback savepoint (to
1007 # avoid broken transaction) and keep going
1008 errors += 1
1009 except UserError as e:
1010 savepoint.rollback()
1011 info = rec_data['info']
1012 messages.append(dict(info, type='error', message=str(e)))
1013 errors += 1
1014 except Exception as e: # noqa: BLE001
1015 savepoint.rollback()
1016 _logger.debug("Error while loading record", exc_info=True)
1017 info = rec_data['info']
1018 message = _('Unknown error during import: %(error_type)s: %(error_message)s', error_type=e.__class__, error_message=e)
1019 moreinfo = _('Resolve other errors first')
1020 messages.append(dict(info, type='error', message=message, moreinfo=moreinfo))
1021 # Failed for some reason, perhaps due to invalid data supplied,
1022 # rollback savepoint and keep going
1023 errors += 1
1024 if errors >= 10 and (errors >= i / 10):
1025 messages.append({
1026 'type': 'warning',
1027 'message': _("Found more than 10 errors and more than one error per 10 records, interrupted to avoid showing too many errors.")
1028 })
1029 break
1030 if errors > 0 and global_error_message and global_error_message not in messages:
1031 # If we cannot create the records 1 by 1, we display the error raised when we created the records simultaneously
1032 messages.insert(0, global_error_message)
1034 # make 'flush' available to the methods below, in the case where XMLID
1035 # resolution fails, for instance
1036 flush_recordset = self.with_context(import_flush=flush, import_cache=LRU(1024))
1038 # TODO: break load's API instead of smuggling via context?
1039 limit = self.env.context.get('_import_limit')
1040 if limit is None: 1040 ↛ 1042line 1040 didn't jump to line 1042 because the condition on line 1040 was always true
1041 limit = float('inf')
1042 extracted = flush_recordset._extract_records(fields, data, log=messages.append, limit=limit)
1044 converted = flush_recordset._convert_records(extracted, log=messages.append, savepoint=savepoint)
1046 info = {'rows': {'to': -1}}
1047 for id, xid, record, info in converted:
1048 if self.env.context.get('import_file') and self.env.context.get('import_skip_records'): 1048 ↛ 1049line 1048 didn't jump to line 1049 because the condition on line 1048 was never true
1049 if any([record.get(field) is None for field in self.env.context['import_skip_records']]):
1050 continue
1051 if xid: 1051 ↛ 1054line 1051 didn't jump to line 1054 because the condition on line 1051 was always true
1052 xid = xid if '.' in xid else "%s.%s" % (current_module, xid)
1053 batch_xml_ids.add(xid)
1054 elif id:
1055 record['id'] = id
1056 batch.append((xid, record, info))
1058 flush()
1059 if any(message['type'] == 'error' for message in messages): 1059 ↛ 1060line 1059 didn't jump to line 1060 because the condition on line 1059 was never true
1060 savepoint.rollback()
1061 ids = False
1062 # cancel all changes done to the registry/ormcache
1063 self.pool.reset_changes()
1064 savepoint.close(rollback=False)
1066 nextrow = info['rows']['to'] + 1
1067 if nextrow < limit: 1067 ↛ 1069line 1067 didn't jump to line 1069 because the condition on line 1067 was always true
1068 nextrow = 0
1069 return {
1070 'ids': ids,
1071 'messages': messages,
1072 'nextrow': nextrow,
1073 }
1075 def _extract_records(self, field_paths, data, log=lambda a: None, limit=float('inf')):
1076 """ Generates record dicts from the data sequence.
1078 The result is a generator of dicts mapping field names to raw
1079 (unconverted, unvalidated) values.
1081 For relational fields, if sub-fields were provided the value will be
1082 a list of sub-records
1084 The following sub-fields may be set on the record (by key):
1086 * None is the display_name for the record (to use with name_create/name_search)
1087 * "id" is the External ID for the record
1088 * ".id" is the Database ID for the record
1089 """
1090 fields = self._fields
1092 get_o2m_values = itemgetter_tuple([
1093 index
1094 for index, fnames in enumerate(field_paths)
1095 if fnames[0] in fields and fields[fnames[0]].type == 'one2many'
1096 ])
1097 get_nono2m_values = itemgetter_tuple([
1098 index
1099 for index, fnames in enumerate(field_paths)
1100 if fnames[0] not in fields or fields[fnames[0]].type != 'one2many'
1101 ])
1102 # Checks if the provided row has any non-empty one2many fields
1103 def only_o2m_values(row):
1104 return any(get_o2m_values(row)) and not any(get_nono2m_values(row))
1106 property_definitions = {}
1107 property_columns = defaultdict(list)
1108 for fname, *__ in field_paths:
1109 if not fname: 1109 ↛ 1110line 1109 didn't jump to line 1110 because the condition on line 1109 was never true
1110 continue
1111 if '.' not in fname: 1111 ↛ 1116line 1111 didn't jump to line 1116 because the condition on line 1111 was always true
1112 if fname not in fields: 1112 ↛ 1113line 1112 didn't jump to line 1113 because the condition on line 1112 was never true
1113 raise ValueError(f'Invalid field name {fname!r}')
1114 continue
1116 f_prop_name, property_name = fname.split('.')
1117 if f_prop_name not in fields or fields[f_prop_name].type != 'properties':
1118 # Can be .id
1119 continue
1121 definition = self.get_property_definition(fname)
1122 if not definition:
1123 # Can happen if someone remove the property, UserError ?
1124 raise ValueError(f"Property {property_name!r} doesn't have any definition on {fname!r} field")
1126 property_definitions[fname] = definition
1127 property_columns[f_prop_name].append(fname)
1129 # m2o fields can't be on multiple lines so don't take it in account
1130 # for only_o2m_values rows filter, but special-case it later on to
1131 # be handled with relational fields (as it can have subfields).
1132 def is_relational(fname):
1133 return (
1134 fname in fields and
1135 fields[fname].relational
1136 ) or (
1137 fname in property_definitions and
1138 property_definitions[fname].get('type') in ('many2one', 'many2many')
1139 )
1141 index = 0
1142 while index < len(data) and index < limit:
1143 row = data[index]
1145 # copy non-relational fields to record dict
1146 record = {
1147 fnames[0]: value
1148 for fnames, value in zip(field_paths, row)
1149 if not is_relational(fnames[0])
1150 }
1152 # Get all following rows which have relational values attached to
1153 # the current record (no non-relational values)
1154 record_span = itertools.takewhile(
1155 only_o2m_values,
1156 (data[j] for j in range(index + 1, len(data))),
1157 )
1158 # stitch record row back on for relational fields
1159 record_span = list(itertools.chain([row], record_span))
1161 for relfield, *__ in field_paths:
1162 if not is_relational(relfield):
1163 continue
1165 if relfield not in property_definitions: 1165 ↛ 1168line 1165 didn't jump to line 1168 because the condition on line 1165 was always true
1166 comodel = self.env[fields[relfield].comodel_name]
1167 else:
1168 comodel = self.env[property_definitions[relfield]['comodel']]
1170 # get only cells for this sub-field, should be strictly
1171 # non-empty, field path [None] is for display_name field
1172 indices, subfields = zip(*((index, fnames[1:] or [None])
1173 for index, fnames in enumerate(field_paths)
1174 if fnames[0] == relfield))
1176 # return all rows which have at least one value for the
1177 # subfields of relfield
1178 relfield_data = [it for it in map(itemgetter_tuple(indices), record_span) if any(it)]
1179 record[relfield] = [
1180 subrecord
1181 for subrecord, _subinfo in comodel._extract_records(subfields, relfield_data, log=log)
1182 ]
1184 for properties_fname, property_indexes_names in property_columns.items(): 1184 ↛ 1185line 1184 didn't jump to line 1185 because the loop on line 1184 never started
1185 properties = []
1186 for property_name in property_indexes_names:
1187 value = record.pop(property_name)
1188 properties.append(dict(**property_definitions[property_name], value=value))
1189 record[properties_fname] = properties
1191 yield record, {'rows': {
1192 'from': index,
1193 'to': index + len(record_span) - 1,
1194 }}
1195 index += len(record_span)
1197 @api.model
1198 def _convert_records(self, records, *, log=lambda a: None, savepoint):
1199 """ Converts records from the source iterable (recursive dicts of
1200 strings) into forms which can be written to the database (via
1201 ``self.create`` or ``(ir.model.data)._update``)
1203 :returns: a list of triplets of (id, xid, record)
1204 :rtype: list[(int|None, str|None, dict)]
1205 """
1206 field_names = {name: field.string for name, field in self._fields.items()}
1207 if self.env.lang: 1207 ↛ 1208line 1207 didn't jump to line 1208 because the condition on line 1207 was never true
1208 field_names.update(self.env['ir.model.fields'].get_field_string(self._name))
1210 convert = self.env['ir.fields.converter'].for_model(self, savepoint=savepoint)
1212 def _log(base, record, field, exception):
1213 type = 'warning' if isinstance(exception, Warning) else 'error'
1214 # logs the logical (not human-readable) field name for automated
1215 # processing of response, but injects human readable in message
1216 field_name = field_names[field]
1217 exc_vals = dict(base, record=record, field=field_name)
1218 record = dict(base, type=type, record=record, field=field,
1219 message=str(exception.args[0]) % exc_vals)
1220 if len(exception.args) > 1:
1221 info = {}
1222 if exception.args[1] and isinstance(exception.args[1], dict):
1223 info = exception.args[1]
1224 # ensure field_name is added to the exception. Used in import to
1225 # concatenate multiple errors in the same block
1226 info['field_name'] = field_name
1227 record.update(info)
1228 log(record)
1230 for stream_index, (record, extras) in enumerate(records):
1231 # xid
1232 xid = record.get('id', False)
1233 # dbid
1234 dbid = False
1235 if record.get('.id'): 1235 ↛ 1236line 1235 didn't jump to line 1236 because the condition on line 1235 was never true
1236 try:
1237 dbid = int(record['.id'])
1238 except ValueError:
1239 # in case of overridden id column
1240 dbid = record['.id']
1241 if not self.search([('id', '=', dbid)]):
1242 log(dict(extras,
1243 type='error',
1244 record=stream_index,
1245 field='.id',
1246 message=_(u"Unknown database identifier '%s'", dbid)))
1247 dbid = False
1249 converted = convert(record, functools.partial(_log, extras, stream_index))
1251 yield dbid, xid, converted, dict(extras, record=stream_index)
1253 def _validate_fields(self, field_names: Iterable[str], excluded_names: Iterable[str] = ()) -> None:
1254 """ Invoke the constraint methods for which at least one field name is
1255 in ``field_names`` and none is in ``excluded_names``.
1256 """
1257 methods = self._constraint_methods
1258 if not methods:
1259 return
1260 # run constrains just as sudoed computed-stored fields
1261 # see Field.compute_value()
1262 records = self.sudo()
1263 field_names = set(field_names)
1264 excluded_names = set(excluded_names)
1265 for check in methods:
1266 if (not field_names.isdisjoint(check._constrains)
1267 and excluded_names.isdisjoint(check._constrains)):
1268 check(records)
1270 @api.model
1271 def default_get(self, fields: Sequence[str]) -> ValuesType:
1272 """Return default values for the fields in ``fields_list``. Default
1273 values are determined by the context, user defaults, user fallbacks
1274 and the model itself.
1276 :param fields: names of field whose default is requested
1277 :return: a dictionary mapping field names to their corresponding default values,
1278 if they have a default value.
1280 .. note::
1282 Unrequested defaults won't be considered, there is no need to return a
1283 value for fields whose names are not in `fields_list`.
1284 """
1285 defaults = {}
1286 parent_fields = defaultdict(list)
1287 ir_defaults = self.env['ir.default']._get_model_defaults(self._name)
1289 for name in fields:
1290 # 1. look up context
1291 key = 'default_' + name
1292 if key in self.env.context:
1293 defaults[name] = self.env.context[key]
1294 continue
1296 field = self._fields.get(name)
1297 if not field: 1297 ↛ 1298line 1297 didn't jump to line 1298 because the condition on line 1297 was never true
1298 continue
1300 # 2. look up default for non-company_dependent fields
1301 if not field.company_dependent and name in ir_defaults:
1302 defaults[name] = ir_defaults[name]
1303 continue
1305 # 3. look up field.default
1306 if field.default:
1307 defaults[name] = field.default(self)
1308 continue
1310 # 4. look up fallback for company_dependent fields
1311 if field.company_dependent and name in ir_defaults:
1312 defaults[name] = ir_defaults[name]
1313 continue
1315 # 5. delegate to parent model
1316 if field.inherited:
1317 field = field.related_field
1318 parent_fields[field.model_name].append(field.name)
1320 # convert default values to the right format
1321 #
1322 # we explicitly avoid using _convert_to_write() for x2many fields,
1323 # because the latter leaves values like [(Command.LINK, 2),
1324 # (Command.LINK, 3)], which are not supported by the web client as
1325 # default values; stepping through the cache allows to normalize
1326 # such a list to [(Command.SET, 0, [2, 3])], which is properly
1327 # supported by the web client
1328 for fname, value in defaults.items():
1329 if fname in self._fields: 1329 ↛ 1328line 1329 didn't jump to line 1328 because the condition on line 1329 was always true
1330 field = self._fields[fname]
1331 value = field.convert_to_cache(value, self, validate=False)
1332 defaults[fname] = field.convert_to_write(value, self)
1334 # add default values for inherited fields
1335 for model, names in parent_fields.items():
1336 defaults.update(self.env[model].default_get(names))
1338 return defaults
1340 @api.model
1341 def _rec_name_fallback(self) -> str:
1342 # if self._rec_name is set, it belongs to self._fields
1343 return self._rec_name or 'id'
1345 @api.model
1346 @api.readonly
1347 def search_count(self, domain: DomainType, limit: int | None = None) -> int:
1348 """Return the number of records in the current model matching
1349 :ref:`the provided domain <reference/orm/domains>`.
1351 :param domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
1352 list to match all records.
1353 :param limit: maximum number of record to count (upperbound) (default: all)
1355 This is a high-level method, which should not be overridden. Its actual
1356 implementation is done by method :meth:`_search`.
1357 """
1358 query = self._search(domain, limit=limit)
1359 return len(query)
1361 @api.model
1362 @api.readonly
1363 def search(self, domain: DomainType, offset: int = 0, limit: int | None = None, order: str | None = None) -> Self:
1364 """Search for the records that satisfy the given ``domain``
1365 :ref:`search domain <reference/orm/domains>`.
1367 :param domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
1368 list to match all records.
1369 :param offset: number of results to ignore (default: none)
1370 :param limit: maximum number of records to return (default: all)
1371 :param order: sort string
1372 :returns: at most ``limit`` records matching the search criteria
1373 :raise AccessError: if user is not allowed to access requested information
1375 This is a high-level method, which should not be overridden. Its actual
1376 implementation is done by method :meth:`_search`.
1377 """
1378 return self.search_fetch(domain, [], offset=offset, limit=limit, order=order)
1380 @api.model
1381 @api.private
1382 @api.readonly
1383 def search_fetch(
1384 self,
1385 domain: DomainType,
1386 field_names: Sequence[str] | None = None,
1387 offset: int = 0,
1388 limit: int | None = None,
1389 order: str | None = None,
1390 ) -> Self:
1391 """Search for the records that satisfy the given ``domain``
1392 :ref:`search domain <reference/orm/domains>`, and fetch the given fields
1393 to the cache. This method is like a combination of methods :meth:`search`
1394 and :meth:`fetch`, but it performs both tasks with a minimal number of
1395 SQL queries.
1397 :param domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
1398 list to match all records.
1399 :param field_names: a collection of field names to fetch, or ``None`` for
1400 all accessible fields marked with ``prefetch=True``
1401 :param offset: number of results to ignore (default: none)
1402 :param limit: maximum number of records to return (default: all)
1403 :param order: sort string
1404 :returns: at most ``limit`` records matching the search criteria
1405 :raise AccessError: if user is not allowed to access requested information
1406 """
1407 # first determine a query that satisfies the domain and access rules
1408 query = self._search(domain, offset=offset, limit=limit, order=order or self._order)
1410 if query.is_empty():
1411 # optimization: don't execute the query at all
1412 if not self.env.su: # check access to fields 1412 ↛ 1413line 1412 didn't jump to line 1413 because the condition on line 1412 was never true
1413 self._determine_fields_to_fetch(field_names)
1414 return self.browse()
1416 fields_to_fetch = self._determine_fields_to_fetch(field_names)
1418 return self._fetch_query(query, fields_to_fetch)
1420 #
1421 # display_name, name_create, name_search
1422 #
1424 @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
1425 def _compute_display_name(self):
1426 """Compute the value of the `display_name` field.
1428 The `display_name` field is a textual representation of the record.
1429 This method can be overridden to change the representation. If needed,
1430 it can be made field-dependent using :attr:`~odoo.api.depends` and
1431 context-dependent using :attr:`~odoo.api.depends_context`.
1432 """
1433 if self._rec_name: 1433 ↛ 1438line 1433 didn't jump to line 1438 because the condition on line 1433 was always true
1434 convert = self._fields[self._rec_name].convert_to_display_name
1435 for record in self:
1436 record.display_name = convert(record[self._rec_name], record)
1437 else:
1438 for record in self:
1439 record.display_name = f"{record._name},{record.id}"
1441 @api.model
1442 def _search_display_name(self, operator, value):
1443 """
1444 Returns a domain that matches records whose display name matches the
1445 given ``name`` pattern when compared with the given ``operator``.
1446 This method is used to implement the search on the ``display_name``
1447 field, and can be overridden to change the search criteria.
1448 The default implementation searches the fields defined in `_rec_names_search`
1449 or `_rec_name`.
1450 """
1451 search_fnames = self._rec_names_search or ([self._rec_name] if self._rec_name else [])
1452 if not search_fnames: 1452 ↛ 1453line 1452 didn't jump to line 1453 because the condition on line 1452 was never true
1453 _logger.warning("Cannot search on display_name, no _rec_name or _rec_names_search defined on %s", self._name)
1454 # do not restrain anything
1455 return Domain.TRUE
1456 if operator.endswith('like') and not value and '=' not in operator: 1456 ↛ 1459line 1456 didn't jump to line 1459 because the condition on line 1456 was never true
1457 # optimize out the default criterion of ``like ''`` that matches everything
1458 # return all when operator is positive
1459 return Domain.FALSE if operator in Domain.NEGATIVE_OPERATORS else Domain.TRUE
1460 aggregator = Domain.AND if operator in Domain.NEGATIVE_OPERATORS else Domain.OR
1461 domains = []
1462 for field_name in search_fnames:
1463 # field_name may be a sequence of field names (partner_id.name)
1464 # retrieve the last field in the sequence
1465 model = self
1466 for fname in field_name.split('.'):
1467 field = model._fields[fname]
1468 model = self.env.get(field.comodel_name)
1469 # depending on the operator, we may need to cast the value to the type of the field
1470 # ignore if we cannot convert
1471 if field.relational: 1471 ↛ 1473line 1471 didn't jump to line 1473 because the condition on line 1471 was never true
1472 # relational fields will search on the display_name
1473 domains.append([(field_name + '.display_name', operator, value)])
1474 elif operator.endswith('like'): 1474 ↛ 1475line 1474 didn't jump to line 1475 because the condition on line 1474 was never true
1475 domains.append([(field_name, operator, value)])
1476 elif isinstance(value, COLLECTION_TYPES): 1476 ↛ 1483line 1476 didn't jump to line 1483 because the condition on line 1476 was always true
1477 typed_value = []
1478 for v in value:
1479 with contextlib.suppress(ValueError, TypeError):
1480 typed_value.append(field.convert_to_write(v, self))
1481 domains.append([(field_name, operator, typed_value)])
1482 else:
1483 with contextlib.suppress(ValueError):
1484 typed_value = field.convert_to_write(value, self)
1485 domains.append([(field_name, operator, typed_value)])
1486 continue
1487 with contextlib.suppress(ValueError, TypeError):
1488 # ignore that case if the value doesn't match the field type
1489 domains.append([(field_name, operator, field.convert_to_write(value, self))])
1490 return aggregator(domains)
1492 @api.model
1493 def name_create(self, name: str) -> tuple[int, str] | typing.Literal[False]:
1494 """Create a new record by calling :meth:`~.create` with only one value
1495 provided: the display name of the new record.
1497 The new record will be initialized with any default values
1498 applicable to this model, or provided through the context. The usual
1499 behavior of :meth:`~.create` applies.
1501 :param name: display name of the record to create
1502 :return: the (id, display_name) pair value of the created record
1503 """
1504 if self._rec_name:
1505 record = self.create({self._rec_name: name})
1506 return record.id, record.display_name
1507 else:
1508 # TODO raise an error, remove False return value
1509 _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
1510 return False
1512 @api.model
1513 @api.readonly
1514 def name_search(
1515 self,
1516 name: str = '',
1517 domain: DomainType | None = None,
1518 operator: str = 'ilike',
1519 limit: int = 100,
1520 ) -> list[tuple[int, str]]:
1521 """Search for records that have a display name matching the given
1522 ``name`` pattern when compared with the given ``operator``, while also
1523 matching the optional search domain (``domain``).
1525 This is used for example to provide suggestions based on a partial
1526 value for a relational field. Should usually behave as the reverse of
1527 ``display_name``, but that is not guaranteed.
1529 This method is equivalent to calling :meth:`~.search` with a search
1530 domain based on ``display_name`` and mapping id and display_name on
1531 the resulting search.
1533 :param name: the name pattern to match
1534 :param domain: search domain (see :meth:`~.search` for syntax),
1535 specifying further restrictions
1536 :param operator: domain operator for matching ``name``,
1537 such as ``'like'`` or ``'='``.
1538 :param limit: max number of records to return
1539 :return: list of pairs ``(id, display_name)`` for all matching records.
1540 """
1541 domain = Domain('display_name', operator, name) & Domain(domain or Domain.TRUE)
1542 records = self.search_fetch(domain, ['display_name'], limit=limit)
1543 return [(record.id, record.display_name) for record in records.sudo()]
1545 @api.model
1546 def _add_missing_default_values(self, values: ValuesType) -> ValuesType:
1547 # avoid overriding inherited values when parent is set
1548 avoid_models = set()
1550 def collect_models_to_avoid(model):
1551 for parent_mname, parent_fname in model._inherits.items():
1552 if parent_fname in values:
1553 avoid_models.add(parent_mname)
1554 else:
1555 # manage the case where an ancestor parent field is set
1556 collect_models_to_avoid(self.env[parent_mname])
1558 collect_models_to_avoid(self)
1560 def avoid(field):
1561 # check whether the field is inherited from one of avoid_models
1562 if avoid_models:
1563 while field.inherited:
1564 field = field.related_field
1565 if field.model_name in avoid_models: 1565 ↛ 1563line 1565 didn't jump to line 1563 because the condition on line 1565 was always true
1566 return True
1567 return False
1569 # compute missing fields
1570 missing_defaults = [
1571 name
1572 for name, field in self._fields.items()
1573 if name not in values
1574 if not avoid(field)
1575 ]
1577 if missing_defaults: 1577 ↛ 1590line 1577 didn't jump to line 1590 because the condition on line 1577 was always true
1578 # override defaults with the provided values, never allow the other way around
1579 defaults = self.default_get(missing_defaults)
1580 for name, value in defaults.items():
1581 if self._fields[name].type == 'many2many' and value and isinstance(value[0], int): 1581 ↛ 1583line 1581 didn't jump to line 1583 because the condition on line 1581 was never true
1582 # convert a list of ids into a list of commands
1583 defaults[name] = [Command.set(value)]
1584 elif self._fields[name].type == 'one2many' and value and isinstance(value[0], dict): 1584 ↛ 1586line 1584 didn't jump to line 1586 because the condition on line 1584 was never true
1585 # convert a list of dicts into a list of commands
1586 defaults[name] = [Command.create(x) for x in value]
1587 defaults.update(values)
1589 else:
1590 defaults = values
1592 # delegate the default properties to the properties field
1593 for field in self._fields.values():
1594 if field.type == 'properties':
1595 defaults[field.name] = field._add_default_values(self.env, defaults)
1597 return defaults
1599 @api.model
1600 def _read_grouping_sets(
1601 self,
1602 domain: DomainType,
1603 grouping_sets: Sequence[Sequence[str]],
1604 aggregates: Sequence[str] = (),
1605 order: str | None = None,
1606 ) -> list[list[tuple]]:
1607 """ Performs multiple aggregations with different groupings in a single query if possible.
1609 This method uses SQL `GROUPING SETS` as a more advanced and efficient
1610 alternative to calling :meth:`~._read_group` multiple times with different
1611 `groupby` parameters. It allows you to get different levels of aggregated
1612 data in one database round-trip.
1613 Note that for many2many multiple SQL might be needed because of the deduplicated rows.
1615 :param domain: :ref:`A search domain <reference/orm/domains>` to filter records before grouping
1616 :param grouping_sets: A list of `groupby` specifications. Each inner list
1617 is a set of fields to group by and is equivalent to the
1618 `groupby` parameter of the :meth:`~._read_group` method.
1619 For example: `[['partner_id'], ['partner_id', 'state']]`.
1620 :param aggregates: list of aggregates specification.
1621 Each element is `'field:agg'` (aggregate field with aggregation function `'agg'`).
1622 The possible aggregation functions are the ones provided by
1623 `PostgreSQL <https://www.postgresql.org/docs/current/static/functions-aggregate.html>`_,
1624 `'count_distinct'` with the expected meaning and `'recordset'` to act like `'array_agg'`
1625 converted into a recordset.
1626 :param order: optional ``order by`` specification, for
1627 overriding the natural sort ordering of the groups,
1628 see also :meth:`~.search`.
1629 :return: A list of lists of tuples. The outer list's structure mirrors the
1630 input `grouping_sets`. Each inner list contains the results for one
1631 grouping specification. Each tuple within an inner list contains the
1632 values for the grouped fields, followed by the aggregate values,
1633 in the order they were specified.
1635 For example, given:
1636 - `grouping_sets=[['foo'], ['foo', 'bar']]`
1637 - `aggregates=['baz:sum']`
1639 The returned structure would be:
1640 ::
1642 [
1643 # Results for ['foo']
1644 [(foo1_val, baz_sum_1), (foo2_val, baz_sum_2), ...],
1645 # Results for ['foo', 'bar']
1646 [(foo1_val, bar1_val, baz_sum_3), (foo2_val, bar2_val, baz_sum_4), ...],
1647 ]
1649 :raise AccessError: if user is not allowed to access requested information
1650 """
1651 if not grouping_sets:
1652 raise ValueError("The 'grouping_sets' parameter cannot be empty.")
1654 query = self._search(domain)
1655 result = [[] for __ in grouping_sets]
1656 if query.is_empty():
1657 return result
1659 # grouping_sets: [(a, b), (a), ()]
1660 # all_groupby_specs: (a, b)
1661 all_groupby_specs = tuple(unique(spec for groupby in grouping_sets for spec in groupby))
1663 # --- Many2many Special Handling ---
1664 many2many_groupby_specs = []
1665 if len(grouping_sets) > 1: # many2many logic only applies if we have multiple groupings
1667 def might_duplicate_rows(model, spec) -> bool:
1668 fname, property_name, __ = parse_read_group_spec(spec)
1669 field = model._fields[fname]
1670 if field.type == 'properties':
1671 definition = self.get_property_definition(f"{fname}.{property_name}")
1672 property_type = definition.get('type')
1673 return property_type in ('tags', 'many2many')
1675 if property_name:
1676 assert field.type == 'many2one'
1677 return might_duplicate_rows(self.env[field.comodel_name], property_name)
1679 return field.type == 'many2many'
1681 for spec in all_groupby_specs:
1682 if might_duplicate_rows(self, spec):
1683 many2many_groupby_specs.append(spec)
1685 if (
1686 many2many_groupby_specs and
1687 # If aggregates are sensitive to row duplication (like sum, avg), we must isolate M2M groupings.
1688 any(
1689 not aggregate.endswith(
1690 (':max', ':min', ':bool_and', ':bool_or', ':array_agg_distinct', ':recordset', ':count_distinct'),
1691 )
1692 for aggregate in aggregates if aggregate != '__count'
1693 )
1694 ):
1695 # The following logic is a recursive decomposition strategy. It's complex
1696 # but necessary to prevent M2M joins from corrupting aggregates in other grouping sets.
1697 # We find all combinations of M2M fields and create a sub-call for grouping sets
1698 # that share that exact combination of M2M fields.
1700 # ['A', 'B', 'C'] => [('A', 'B', 'C'), ('A', 'B'), ('A', 'C'), ('B', 'C'), ('A',), ('B',), ('C',), ()]
1701 m2m_combinaisons = (
1702 groupby for i in range(len(many2many_groupby_specs), -1, -1)
1703 for groupby in itertools.combinations(many2many_groupby_specs, i)
1704 )
1706 grouping_sets_to_process = dict(enumerate(grouping_sets))
1707 batched_calls = [] # [([groupby, ...], [index_result, ...])]
1709 for m2m_comb in m2m_combinaisons:
1710 if not grouping_sets_to_process:
1711 break
1712 sub_grouping_sets = []
1713 sub_result_indexes = []
1714 for i, groupby in list(grouping_sets_to_process.items()):
1715 if all(m2m in groupby for m2m in m2m_comb):
1716 sub_grouping_sets.append(groupby)
1717 sub_result_indexes.append(i)
1718 grouping_sets_to_process.pop(i)
1720 if sub_grouping_sets:
1721 batched_calls.append((sub_result_indexes, sub_grouping_sets))
1723 assert not grouping_sets_to_process
1724 # If the problem was decomposed, make recursive calls and assemble results.
1725 if len(batched_calls) > 1:
1726 for indexes, sub_grouping_sets in batched_calls:
1728 sub_order_parts = []
1729 all_sub_groupby = {spec for groupby in sub_grouping_sets for spec in groupby}
1730 for order_part in (order or '').split(','):
1731 order_part = order_part.strip()
1732 if not any(
1733 order_part.startswith(spec)
1734 for spec in all_groupby_specs if spec not in all_sub_groupby
1735 ):
1736 sub_order_parts.append(order_part)
1738 sub_results = self._read_grouping_sets(
1739 domain, sub_grouping_sets, aggregates=aggregates, order=",".join(sub_order_parts),
1740 )
1741 for index, subresult in zip(indexes, sub_results):
1742 result[index] = subresult
1743 return result
1745 elif many2many_groupby_specs and '__count' in aggregates:
1746 # Efficiently handle '__count' with M2M fields by using a distinct count on 'id'
1747 # without making another _read_grouping_sets (this is the very common case).
1748 aggregates = tuple(
1749 aggregate if aggregate != '__count' else 'id:count_distinct'
1750 for aggregate in aggregates
1751 )
1752 if order:
1753 order = order.replace('__count', 'id:count_distinct')
1755 # --- SQL Query Construction ---
1756 groupby_terms: dict[str, SQL] = {
1757 spec: self._read_group_groupby(self._table, spec, query) for spec in all_groupby_specs
1758 }
1759 aggregates_terms: list[SQL] = [
1760 self._read_group_select(spec, query) for spec in aggregates
1761 ]
1762 if groupby_terms:
1763 # grouping_select_sql: GROUPING(a, b)
1764 grouping_select_sql = SQL("GROUPING(%s)", SQL(", ").join(unique(groupby_terms.values())))
1765 else:
1766 # GROUPING() is invalid SQL, so we use the 0 as literal
1767 grouping_select_sql = SQL("0")
1769 select_args = [grouping_select_sql, *groupby_terms.values(), *aggregates_terms]
1771 # _read_group_orderby may change groupby_terms then it is necessary to be call before
1772 query.order = self._read_group_orderby(order, groupby_terms, query)
1773 # GROUPING SET ((a, b), (a), ())
1774 grouping_sets_sql = [
1775 SQL("(%s)", SQL(", ").join(groupby_terms[groupby_spec] for groupby_spec in grouping_set))
1776 for grouping_set in grouping_sets
1777 ]
1778 query.groupby = SQL("GROUPING SETS (%s)", SQL(", ").join(unique(grouping_sets_sql)))
1780 # This handles the case where `order` adds columns that must also be in `GROUP BY`.
1781 # Rebuild the grouping sets to include these extra terms.
1783 # row_values: [(GROUPING(...), a1, b1, aggregates...), (GROUPING(...), a2, b2, aggregates...), ...]
1784 row_values = self.env.execute_query(query.select(*select_args))
1786 if not row_values: # shortcut
1787 return result
1789 # --- Result Post-Processing ---
1790 # This is the core of the result dispatching logic. It uses the integer
1791 # returned by GROUPING() as a key to map each result row to the correct
1792 # grouping set defined by the user.
1793 aggregates_indexes = tuple(range(len(all_groupby_specs), len(all_groupby_specs) + len(aggregates)))
1795 # Map each possible GROUPING() bitmask to its corresponding result list and value extractor.
1796 # {GROUPING(...): (append_method, extractor_method)}
1797 mask_grouping_mapping = {}
1799 # Create a mapping from each unique SQL GROUP BY term to its bitmask value.
1800 # The terms are reversed to match the PostgreSQL logic where the bitmask was
1801 # calculated from right to left (LSB first).
1802 # See PostgreSQL Doc: https://www.postgresql.org/docs/17/functions-aggregate.html#Grouping-Operations
1803 mask_sql_mapping = {
1804 sql_groupby: 1 << i
1805 for i, sql_groupby in enumerate(unique(reversed(groupby_terms.values())))
1806 }
1808 mask_grouping_result_indexes = defaultdict(list) # To manage "duplicated" groupby
1809 for result_index, groupby in enumerate(grouping_sets):
1810 # E.g. GROUPING SET ((a, b), (a), ())
1811 # GROUPING(a, b): a and b included = 0, a included = 1, b included = 2, none included = 3
1812 sql_terms = {groupby_terms[groupby_spec] for groupby_spec in groupby}
1813 groupby_mask = sum(
1814 mask for sql_term, mask in mask_sql_mapping.items()
1815 # each bit is 0 if the corresponding expression is included in the grouping criteria
1816 # of the grouping set generating the current result row, and 1 if it is not included.
1817 if sql_term not in sql_terms
1818 )
1820 mask_grouping_result_indexes[groupby_mask].append(result_index)
1821 if groupby_mask not in mask_grouping_mapping:
1822 mask_grouping_mapping[groupby_mask] = (
1823 result[result_index].append,
1824 itemgetter_tuple(list(itertools.chain(
1825 (all_groupby_specs.index(groupby_spec) for groupby_spec in groupby),
1826 aggregates_indexes,
1827 ))),
1828 )
1830 aggregates_start_index = len(all_groupby_specs) + 1
1831 # Transpose rows to columns for efficient, column-wise post-processing.
1832 columns = list(zip(*row_values))
1833 # The first column is the grouping mask
1834 dispatch_info = map(mask_grouping_mapping.__getitem__, columns[0])
1835 # Post-process values column by column
1836 columns = [
1837 *map(self._read_group_postprocess_groupby, all_groupby_specs, columns[1:aggregates_start_index]),
1838 *map(self._read_group_postprocess_aggregate, aggregates, columns[aggregates_start_index:]),
1839 ]
1841 # result: [
1842 # [(a1, b1, <aggregates>), (a2, b2, <aggregates>), ...],
1843 # [(a1, <aggregates>), (a2, <aggregates>), ...],
1844 # [(<aggregates>)],
1845 # ]
1846 for (append_method, extractor), *row in zip(dispatch_info, *columns, strict=True):
1847 append_method(extractor(row))
1849 # Manage groupbys targetting the same column(s), then having the same results
1850 for duplicate_groups_indexes in mask_grouping_result_indexes.values():
1851 if len(duplicate_groups_indexes) < 2:
1852 continue
1853 # The first index's result is the source for all others in this group
1854 source_result_group = result[duplicate_groups_indexes[0]]
1855 for duplicate_group_index in duplicate_groups_indexes[1:]:
1856 result[duplicate_group_index] = source_result_group[:]
1858 return result
1860 @api.model
1861 def _read_group(
1862 self,
1863 domain: DomainType,
1864 groupby: Sequence[str] = (),
1865 aggregates: Sequence[str] = (),
1866 having: DomainType = (),
1867 offset: int = 0,
1868 limit: int | None = None,
1869 order: str | None = None,
1870 ) -> list[tuple]:
1871 """ Get fields aggregations specified by ``aggregates`` grouped by the given ``groupby``
1872 fields where record are filtered by the ``domain``.
1874 :param domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
1875 list to match all records.
1876 :param groupby: list of groupby descriptions by which the records will be grouped.
1877 A groupby description is either a field (then it will be grouped by that field)
1878 or a string `'field:granularity'`. Right now, the only supported granularities
1879 are `'day'`, `'week'`, `'month'`, `'quarter'` or `'year'`, and they only make sense for
1880 date/datetime fields.
1881 Additionally integer date parts are also supported:
1882 `'year_number'`, `'quarter_number'`, `'month_number'`, `'iso_week_number'`, `'day_of_year'`, `'day_of_month'`,
1883 'day_of_week', 'hour_number', 'minute_number' and 'second_number'.
1884 :param aggregates: list of aggregates specification.
1885 Each element is `'field:agg'` (aggregate field with aggregation function `'agg'`).
1886 The possible aggregation functions are the ones provided by
1887 `PostgreSQL <https://www.postgresql.org/docs/current/static/functions-aggregate.html>`_,
1888 `'count_distinct'` with the expected meaning and `'recordset'` to act like `'array_agg'`
1889 converted into a recordset.
1890 :param having: A domain where the valid "fields" are the aggregates.
1891 :param offset: optional number of groups to skip
1892 :param limit: optional max number of groups to return
1893 :param order: optional ``order by`` specification, for
1894 overriding the natural sort ordering of the groups,
1895 see also :meth:`~.search`.
1896 :return: list of tuples containing in the order the groups values and aggregates values (flatten):
1897 `[(groupby_1_value, ... , aggregate_1_value_aggregate, ...), ...]`.
1898 If group is related field, the value of it will be a recordset (with a correct prefetch set).
1900 :raise AccessError: if user is not allowed to access requested information
1901 """
1902 self.browse().check_access('read')
1904 query = self._search(domain)
1905 if query.is_empty():
1906 if not groupby:
1907 # when there is no group, postgresql always return a row
1908 return [tuple(
1909 self._read_group_empty_value(spec)
1910 for spec in itertools.chain(groupby, aggregates)
1911 )]
1912 return []
1914 query.limit = limit
1915 query.offset = offset
1917 groupby_terms: dict[str, SQL] = {
1918 spec: self._read_group_groupby(self._table, spec, query)
1919 for spec in groupby
1920 }
1921 aggregates_terms: list[SQL] = [
1922 self._read_group_select(spec, query)
1923 for spec in aggregates
1924 ]
1925 select_args = [*[groupby_terms[spec] for spec in groupby], *aggregates_terms]
1926 if groupby_terms:
1927 query.order = self._read_group_orderby(order, groupby_terms, query)
1928 query.groupby = SQL(", ").join(groupby_terms.values())
1929 query.having = self._read_group_having(list(having), query)
1931 # row_values: [(a1, b1, c1), (a2, b2, c2), ...]
1932 row_values = self.env.execute_query(query.select(*select_args))
1934 if not row_values:
1935 return row_values
1937 # post-process values column by column
1938 column_iterator = zip(*row_values)
1940 # column_result: [(a1, a2, ...), (b1, b2, ...), (c1, c2, ...)]
1941 column_result = []
1942 for spec in groupby:
1943 column = self._read_group_postprocess_groupby(spec, next(column_iterator))
1944 column_result.append(column)
1945 for spec in aggregates:
1946 column = self._read_group_postprocess_aggregate(spec, next(column_iterator))
1947 column_result.append(column)
1948 assert next(column_iterator, None) is None
1950 # return [(a1, b1, c1), (a2, b2, c2), ...]
1951 return list(zip(*column_result))
1953 def _read_group_select(self, aggregate_spec: str, query: Query) -> SQL:
1954 """ Return <SQL expression> corresponding to the given aggregation.
1955 The method also checks whether the fields used in the aggregate are
1956 accessible for reading.
1957 """
1958 if aggregate_spec == '__count':
1959 return SQL("COUNT(*)")
1961 fname, property_name, func = parse_read_group_spec(aggregate_spec)
1963 if property_name: 1963 ↛ 1964line 1963 didn't jump to line 1964 because the condition on line 1963 was never true
1964 raise ValueError(f"Invalid {aggregate_spec!r}, this dot notation is not supported")
1966 if fname not in self._fields: 1966 ↛ 1967line 1966 didn't jump to line 1967 because the condition on line 1966 was never true
1967 raise ValueError(f"Invalid field {fname!r} on model {self._name!r} for {aggregate_spec!r}.")
1968 if not func: 1968 ↛ 1969line 1968 didn't jump to line 1969 because the condition on line 1968 was never true
1969 raise ValueError(f"Aggregate method is mandatory for {fname!r}")
1971 field = self._fields[fname]
1972 if func == 'sum_currency': 1972 ↛ 1973line 1972 didn't jump to line 1973 because the condition on line 1972 was never true
1973 if field.type != 'monetary':
1974 raise ValueError(f'Aggregator "sum_currency" only works on currency field for {fname!r}')
1976 CurrencyRate = self.env['res.currency.rate']
1977 rate_subquery_table = SQL(
1978 """(SELECT DISTINCT ON (%(currency_field_sql)s) %(currency_field_sql)s, %(rate_field_sql)s
1979 FROM "res_currency_rate"
1980 WHERE %(company_field_sql)s IS NULL OR %(company_field_sql)s = %(company_id)s
1981 ORDER BY
1982 %(currency_field_sql)s,
1983 %(company_field_sql)s,
1984 CASE WHEN %(name_field_sql)s <= %(today)s THEN %(name_field_sql)s END DESC,
1985 CASE WHEN %(name_field_sql)s > %(today)s THEN %(name_field_sql)s END ASC)
1986 """,
1987 currency_field_sql=CurrencyRate._field_to_sql(CurrencyRate._table, 'currency_id'),
1988 rate_field_sql=CurrencyRate._field_to_sql(CurrencyRate._table, 'rate'),
1989 company_field_sql=CurrencyRate._field_to_sql(CurrencyRate._table, 'company_id'),
1990 company_id=self.env.company.root_id.id,
1991 name_field_sql=CurrencyRate._field_to_sql(CurrencyRate._table, 'name'),
1992 today=Date.context_today(self),
1993 )
1994 currency_field_name = field.get_currency_field(self)
1995 alias_rate = query.make_alias(self._table, f'{currency_field_name}__rates')
1996 currency_field_sql = self._field_to_sql(self._table, currency_field_name, query)
1997 condition = SQL("%s = %s", currency_field_sql, SQL.identifier(alias_rate, "currency_id"))
1998 query.add_join('LEFT JOIN', alias_rate, rate_subquery_table, condition)
2000 return SQL(
2001 "SUM(%s / COALESCE(%s, 1.0))",
2002 self._field_to_sql(self._table, fname, query),
2003 SQL.identifier(alias_rate, "rate"),
2004 )
2006 if func not in READ_GROUP_AGGREGATE: 2006 ↛ 2007line 2006 didn't jump to line 2007 because the condition on line 2006 was never true
2007 raise ValueError(f"Invalid aggregate method {func!r} for {aggregate_spec!r}.")
2009 if func == 'recordset' and not (field.relational or fname == 'id'): 2009 ↛ 2010line 2009 didn't jump to line 2010 because the condition on line 2009 was never true
2010 raise ValueError(f"Aggregate method {func!r} can be only used on relational field (or id) (for {aggregate_spec!r}).")
2012 sql_field = self._field_to_sql(self._table, fname, query)
2013 return READ_GROUP_AGGREGATE[func](self._table, sql_field)
2015 def _read_group_groupby(self, alias: str, groupby_spec: str, query: Query) -> SQL:
2016 """ Return <SQL expression> corresponding to the given groupby element.
2017 The method also checks whether the fields used in the groupby are
2018 accessible for reading.
2019 """
2020 fname, seq_fnames, granularity = parse_read_group_spec(groupby_spec)
2021 if fname not in self._fields: 2021 ↛ 2022line 2021 didn't jump to line 2022 because the condition on line 2021 was never true
2022 raise ValueError(f"Invalid field {fname!r} on model {self._name!r}")
2024 field = self._fields[fname]
2026 if field.type == 'properties': 2026 ↛ 2027line 2026 didn't jump to line 2027 because the condition on line 2026 was never true
2027 sql_expr = self._read_group_groupby_properties(alias, field, seq_fnames, query)
2029 elif seq_fnames: 2029 ↛ 2030line 2029 didn't jump to line 2030 because the condition on line 2029 was never true
2030 if field.type != 'many2one':
2031 raise ValueError(f"Only many2one path is accepted for the {groupby_spec!r} groupby spec")
2033 comodel = self.env[field.comodel_name]
2034 coquery = comodel.with_context(active_test=False)._search([])
2035 if self.env.su or not coquery.where_clause:
2036 coalias = query.make_alias(alias, fname)
2037 else:
2038 coalias = query.make_alias(alias, f"{fname}__{self.env.uid}")
2039 condition = SQL(
2040 "%s = %s",
2041 self._field_to_sql(alias, fname, query),
2042 SQL.identifier(coalias, 'id'),
2043 )
2044 if coquery.where_clause:
2045 subselect_arg = SQL('%s.*', SQL.identifier(comodel._table))
2046 query.add_join('LEFT JOIN', coalias, coquery.subselect(subselect_arg), condition)
2047 else:
2048 query.add_join('LEFT JOIN', coalias, comodel._table, condition)
2049 return comodel._read_group_groupby(coalias, f"{seq_fnames}:{granularity}" if granularity else seq_fnames, query)
2051 elif granularity and field.type not in ('datetime', 'date', 'properties'): 2051 ↛ 2052line 2051 didn't jump to line 2052 because the condition on line 2051 was never true
2052 raise ValueError(f"Granularity set on a no-datetime field or property: {groupby_spec!r}")
2054 elif field.type == 'many2many':
2055 if field.related and not field.store: 2055 ↛ 2056line 2055 didn't jump to line 2056 because the condition on line 2055 was never true
2056 _model, field, alias = self._traverse_related_sql(alias, field, query)
2058 if not field.store:
2059 raise ValueError(f"Group by non-stored many2many field: {groupby_spec!r}")
2060 # special case for many2many fields: prepare a query on the comodel
2061 # and inject the query as an extra condition of the left join
2062 codomain = field.get_comodel_domain(self)
2063 comodel = self.env[field.comodel_name].with_context(**field.context)
2064 coquery = comodel._search(codomain, bypass_access=field.bypass_search_access)
2065 # LEFT JOIN {field.relation} AS rel_alias ON
2066 # alias.id = rel_alias.{field.column1}
2067 # AND rel_alias.{field.column2} IN ({coquery})
2068 rel_alias = query.make_alias(alias, field.name)
2069 condition = SQL(
2070 "%s = %s",
2071 SQL.identifier(alias, 'id'),
2072 SQL.identifier(rel_alias, field.column1),
2073 )
2074 if coquery.where_clause:
2075 condition = SQL(
2076 "%s AND %s IN %s",
2077 condition,
2078 SQL.identifier(rel_alias, field.column2),
2079 coquery.subselect(),
2080 )
2081 query.add_join("LEFT JOIN", rel_alias, field.relation, condition)
2082 return SQL.identifier(rel_alias, field.column2)
2084 else:
2085 sql_expr = self._field_to_sql(alias, fname, query)
2087 if field.type in ('datetime', 'date') or (field.type == 'properties' and granularity):
2088 if not granularity: 2088 ↛ 2089line 2088 didn't jump to line 2089 because the condition on line 2088 was never true
2089 raise ValueError(f"Granularity not set on a date(time) field: {groupby_spec!r}")
2090 if granularity not in READ_GROUP_ALL_TIME_GRANULARITY: 2090 ↛ 2091line 2090 didn't jump to line 2091 because the condition on line 2090 was never true
2091 raise ValueError(f"Granularity specification isn't correct: {granularity!r}")
2093 if granularity in READ_GROUP_NUMBER_GRANULARITY: 2093 ↛ 2094line 2093 didn't jump to line 2094 because the condition on line 2093 was never true
2094 sql_expr = field.property_to_sql(sql_expr, granularity, self, alias, query)
2095 elif field.type == 'datetime': 2095 ↛ 2097line 2095 didn't jump to line 2097 because the condition on line 2095 was never true
2096 # set the timezone only
2097 sql_expr = field.property_to_sql(sql_expr, 'tz', self, alias, query)
2099 if granularity == 'week': 2099 ↛ 2101line 2099 didn't jump to line 2101 because the condition on line 2099 was never true
2100 # first_week_day: 0=Monday, 1=Tuesday, ...
2101 first_week_day = int(get_lang(self.env).week_start) - 1
2102 days_offset = first_week_day and 7 - first_week_day
2103 interval = f"-{days_offset} DAY"
2104 sql_expr = SQL(
2105 "(date_trunc('week', %s::timestamp - INTERVAL %s) + INTERVAL %s)",
2106 sql_expr, interval, interval,
2107 )
2108 elif granularity in READ_GROUP_TIME_GRANULARITY: 2108 ↛ 2112line 2108 didn't jump to line 2112 because the condition on line 2108 was always true
2109 sql_expr = SQL("date_trunc(%s, %s::timestamp)", granularity, sql_expr)
2111 # If the granularity is a part number, the result is a number (double) so no conversion is needed
2112 if field.type == 'date' and granularity not in READ_GROUP_NUMBER_GRANULARITY: 2112 ↛ 2119line 2112 didn't jump to line 2119 because the condition on line 2112 was always true
2113 # If the granularity uses date_trunc, we need to convert the timestamp back to a date.
2114 sql_expr = SQL("%s::date", sql_expr)
2116 elif field.type == 'boolean':
2117 sql_expr = SQL("COALESCE(%s, FALSE)", sql_expr)
2119 return sql_expr
2121 def _read_group_having(self, having_domain: list, query: Query) -> SQL:
2122 """ Return <SQL expression> corresponding to the having domain.
2123 """
2124 if not having_domain:
2125 return SQL()
2127 stack: list[SQL] = []
2128 SUPPORTED = ('in', 'not in', '<', '>', '<=', '>=', '=', '!=')
2129 for item in reversed(having_domain):
2130 if item == '!': 2130 ↛ 2131line 2130 didn't jump to line 2131 because the condition on line 2130 was never true
2131 stack.append(SQL("(NOT %s)", stack.pop()))
2132 elif item == '&': 2132 ↛ 2133line 2132 didn't jump to line 2133 because the condition on line 2132 was never true
2133 stack.append(SQL("(%s AND %s)", stack.pop(), stack.pop()))
2134 elif item == '|': 2134 ↛ 2135line 2134 didn't jump to line 2135 because the condition on line 2134 was never true
2135 stack.append(SQL("(%s OR %s)", stack.pop(), stack.pop()))
2136 elif isinstance(item, (list, tuple)) and len(item) == 3: 2136 ↛ 2143line 2136 didn't jump to line 2143 because the condition on line 2136 was always true
2137 left, operator, right = item
2138 if operator not in SUPPORTED: 2138 ↛ 2139line 2138 didn't jump to line 2139 because the condition on line 2138 was never true
2139 raise ValueError(f"Invalid having clause {item!r}: supported comparators are {SUPPORTED}")
2140 sql_left = self._read_group_select(left, query)
2141 stack.append(SQL("%s%s%s", sql_left, SQL_OPERATORS[operator], right))
2142 else:
2143 raise ValueError(f"Invalid having clause {item!r}: it should be a domain-like clause")
2145 while len(stack) > 1: 2145 ↛ 2146line 2145 didn't jump to line 2146 because the condition on line 2145 was never true
2146 stack.append(SQL("(%s AND %s)", stack.pop(), stack.pop()))
2148 return stack[0]
2150 def _read_group_orderby(self, order: str, groupby_terms: dict[str, SQL],
2151 query: Query) -> SQL:
2152 """ Return (<SQL expression>, <SQL expression>)
2153 corresponding to the given order and groupby terms.
2155 Note: this method may change groupby_terms
2157 :param order: the order specification
2158 :param groupby_terms: the group by terms mapping ({spec: sql_expression})
2159 :param query: The query we are building
2160 """
2161 if order: 2161 ↛ 2162line 2161 didn't jump to line 2162 because the condition on line 2161 was never true
2162 traverse_many2one = True
2163 else:
2164 order = ','.join(groupby_terms)
2165 traverse_many2one = False
2167 if not order: 2167 ↛ 2168line 2167 didn't jump to line 2168 because the condition on line 2167 was never true
2168 return SQL()
2170 orderby_terms = []
2172 for order_part in order.split(','):
2173 order_match = regex_order_part_read_group.fullmatch(order_part)
2174 if not order_match: 2174 ↛ 2175line 2174 didn't jump to line 2175 because the condition on line 2174 was never true
2175 raise ValueError(f"Invalid order {order!r} for _read_group()")
2176 term = order_match['term']
2177 direction = (order_match['direction'] or 'ASC').upper()
2178 nulls = (order_match['nulls'] or '').upper()
2180 sql_direction = SQL(direction) if direction in ('ASC', 'DESC') else SQL()
2181 sql_nulls = SQL(nulls) if nulls in ('NULLS FIRST', 'NULLS LAST') else SQL()
2183 if term not in groupby_terms: 2183 ↛ 2184line 2183 didn't jump to line 2184 because the condition on line 2183 was never true
2184 try:
2185 sql_expr = self._read_group_select(term, query)
2186 except ValueError as e:
2187 raise ValueError(f"Order term {order_part!r} is not a valid aggregate nor valid groupby") from e
2188 orderby_terms.append(SQL("%s %s %s", sql_expr, sql_direction, sql_nulls))
2189 continue
2191 field = self._fields.get(term)
2192 __, __, granularity = parse_read_group_spec(term)
2193 if ( 2193 ↛ 2197line 2193 didn't jump to line 2197 because the condition on line 2193 was never true
2194 traverse_many2one and field and field.type == 'many2one'
2195 and self.env[field.comodel_name]._order != 'id'
2196 ):
2197 if sql_order := self._order_to_sql(f'{term} {direction} {nulls}', query):
2198 orderby_terms.append(sql_order)
2199 if query._order_groupby:
2200 groupby_terms[term] = SQL(", ").join([groupby_terms[term], *query._order_groupby])
2201 query._order_groupby.clear()
2203 elif granularity == 'day_of_week': 2203 ↛ 2204line 2203 didn't jump to line 2204 because the condition on line 2203 was never true
2204 """
2205 Day offset relative to the first day of week in the user lang
2206 formula: ((7 - first_week_day) + day_in_SQL) % 7
2208 | week starts on
2209 SQL | mon sun sat
2210 | 1 | 7 | 6 <-- first_week_day (in odoo)
2211 -----|-----------------
2212 mon 1 | 0 | 1 | 2
2213 tue 2 | 1 | 2 | 3
2214 wed 3 | 2 | 3 | 4
2215 thu 4 | 3 | 4 | 5
2216 fri 5 | 4 | 5 | 6
2217 sat 6 | 5 | 6 | 0
2218 sun 0 | 6 | 0 | 1
2219 """
2220 first_week_day = int(get_lang(self.env).week_start)
2221 sql_expr = SQL("mod(7 - %s + %s::int, 7)", first_week_day, groupby_terms[term])
2222 orderby_terms.append(SQL("%s %s %s", sql_expr, sql_direction, sql_nulls))
2223 else:
2224 sql_expr = groupby_terms[term]
2225 orderby_terms.append(SQL("%s %s %s", sql_expr, sql_direction, sql_nulls))
2227 return SQL(", ").join(orderby_terms)
2229 @api.model
2230 def _read_group_empty_value(self, spec):
2231 """ Return the empty value corresponding to the given groupby spec or aggregate spec. """
2232 if spec == '__count':
2233 return 0
2234 fname, chain_fnames, func = parse_read_group_spec(spec) # func is either None, granularity or an aggregate
2235 if func in ('count', 'count_distinct'):
2236 return 0
2237 if func in ('array_agg', 'array_agg_distinct'):
2238 return []
2239 field = self._fields[fname]
2240 if (not func or func == 'recordset') and (field.relational or fname == 'id'):
2241 if chain_fnames and field.type == 'many2one': 2241 ↛ 2242line 2241 didn't jump to line 2242 because the condition on line 2241 was never true
2242 groupby_seq = f"{chain_fnames}:{func}" if func else chain_fnames
2243 model = self.env[field.comodel_name]
2244 return model._read_group_empty_value(groupby_seq)
2245 return self.env[field.comodel_name] if field.relational else self.env[self._name]
2246 return False
2248 def _read_group_postprocess_groupby(self, groupby_spec, raw_values):
2249 """ Convert the given values of ``groupby_spec``
2250 from PostgreSQL to the format returned by method ``_read_group()``.
2252 The formatting rules can be summarized as:
2253 - groupby values of relational fields are converted to recordsets with a correct prefetch set;
2254 - NULL values are converted to empty values corresponding to the given aggregate.
2255 """
2256 empty_value = self._read_group_empty_value(groupby_spec)
2258 fname, chain_fnames, granularity = parse_read_group_spec(groupby_spec)
2259 field = self._fields[fname]
2261 if field.relational or fname == 'id':
2262 if chain_fnames and field.relational: 2262 ↛ 2263line 2262 didn't jump to line 2263 because the condition on line 2262 was never true
2263 groupby_seq = f"{chain_fnames}:{granularity}" if granularity else chain_fnames
2264 model = self.env[field.comodel_name]
2265 return model._read_group_postprocess_groupby(groupby_seq, raw_values)
2267 Model = self.pool[field.comodel_name] if field.relational else self.pool[self._name]
2268 prefetch_ids = tuple(raw_value for raw_value in raw_values if raw_value)
2270 def recordset(value):
2271 return Model(self.env, (value,), prefetch_ids) if value else empty_value
2273 return (recordset(value) for value in raw_values)
2275 return ((value if value is not None else empty_value) for value in raw_values)
2277 def _read_group_postprocess_aggregate(self, aggregate_spec, raw_values):
2278 """ Convert the given values of ``aggregate_spec``
2279 from PostgreSQL to the format returned by method ``_read_group()``.
2281 The formatting rules can be summarized as:
2282 - 'recordset' aggregates are turned into recordsets with a correct prefetch set;
2283 - NULL values are converted to empty values corresponding to the given aggregate.
2284 """
2285 empty_value = self._read_group_empty_value(aggregate_spec)
2287 if aggregate_spec == '__count':
2288 return ((value if value is not None else empty_value) for value in raw_values)
2290 fname, __, func = parse_read_group_spec(aggregate_spec)
2291 if func == 'recordset':
2292 field = self._fields[fname]
2293 Model = self.pool[field.comodel_name] if field.relational else self.pool[self._name]
2294 prefetch_ids = tuple(unique(
2295 id_
2296 for array_values in raw_values if array_values
2297 for id_ in array_values if id_
2298 ))
2300 def recordset(value):
2301 if not value:
2302 return empty_value
2303 ids = tuple(unique(id_ for id_ in value if id_))
2304 return Model(self.env, ids, prefetch_ids)
2306 return (recordset(value) for value in raw_values)
2308 return ((value if value is not None else empty_value) for value in raw_values)
2310 @api.model
2311 def _read_group_expand_full(self, groups: ModelType, domain: DomainType) -> ModelType:
2312 """Extend the group to include all target records by default."""
2313 return groups.search([])
2315 @api.model
2316 def _read_group_fill_results(self, domain, groupby, annoted_aggregates, read_group_result, read_group_order=None):
2317 """Helper method for filling in empty groups for all possible values of
2318 the field being grouped by"""
2319 field_name = groupby.split('.')[0].split(':')[0]
2320 field = self._fields[field_name]
2321 if not field or not field.group_expand:
2322 return read_group_result
2324 # field.group_expand is a callable or the name of a method, that returns
2325 # the groups that we want to display for this field, in the form of a
2326 # recordset or a list of values (depending on the type of the field).
2327 # This is useful to implement kanban views for instance, where some
2328 # columns should be displayed even if they don't contain any record.
2329 group_expand = field.group_expand
2330 if isinstance(group_expand, str):
2331 group_expand = getattr(self.env.registry[self._name], group_expand)
2332 assert callable(group_expand)
2334 # determine all groups that should be returned
2335 values = [line[groupby] for line in read_group_result if line[groupby]]
2337 if field.relational:
2338 # groups is a recordset; determine order on groups's model
2339 groups = self.env[field.comodel_name].browse(value.id for value in values)
2340 values = group_expand(self, groups, domain).sudo()
2341 if read_group_order == groupby + ' desc':
2342 values.browse(reversed(values._ids))
2343 value2key = lambda value: value and value.id
2345 else:
2346 # groups is a list of values
2347 values = group_expand(self, values, domain)
2348 if read_group_order == groupby + ' desc':
2349 values.reverse()
2350 value2key = lambda value: value
2352 # Merge the current results (list of dicts) with all groups. Determine
2353 # the global order of results groups, which is supposed to be in the
2354 # same order as read_group_result (in the case of a many2one field).
2356 read_group_result_as_dict = {}
2357 for line in read_group_result:
2358 read_group_result_as_dict[value2key(line[groupby])] = line
2360 empty_item = {
2361 name: self._read_group_empty_value(spec)
2362 for name, spec in annoted_aggregates.items()
2363 }
2365 result = {}
2366 # fill result with the values order
2367 for value in values:
2368 key = value2key(value)
2369 if key in read_group_result_as_dict:
2370 result[key] = read_group_result_as_dict.pop(key)
2371 else:
2372 result[key] = dict(empty_item, **{groupby: value})
2374 for line in read_group_result_as_dict.values():
2375 key = value2key(line[groupby])
2376 result[key] = line
2378 # add folding information if present
2379 if field.relational and groups._fold_name in groups._fields:
2380 fold = {group.id: group[groups._fold_name]
2381 for group in groups.browse(key for key in result if key)}
2382 for key, line in result.items():
2383 line['__fold'] = fold.get(key, False)
2385 return list(result.values())
2387 @api.model
2388 def _read_group_fill_temporal(self, data, groupby, annoted_aggregates,
2389 fill_from=False, fill_to=False, min_groups=False):
2390 """Helper method for filling date/datetime 'holes' in a result set.
2392 We are in a use case where data are grouped by a date field (typically
2393 months but it could be any other interval) and displayed in a chart.
2395 Assume we group records by month, and we only have data for June,
2396 September and December. By default, plotting the result gives something
2397 like::
2399 ___
2400 ___ | |
2401 | | ___ | |
2402 |___||___||___|
2403 Jun Sep Dec
2405 The problem is that December data immediately follow September data,
2406 which is misleading for the user. Adding explicit zeroes for missing
2407 data gives something like::
2409 ___
2410 ___ | |
2411 | | ___ | |
2412 |___| ___ ___ |___| ___ ___ |___|
2413 Jun Jul Aug Sep Oct Nov Dec
2415 To customize this output, the context key "fill_temporal" can be used
2416 under its dictionary format, which has 3 attributes : fill_from,
2417 fill_to, min_groups (see params of this function)
2419 Fill between bounds:
2420 Using either `fill_from` and/or `fill_to` attributes, we can further
2421 specify that at least a certain date range should be returned as
2422 contiguous groups. Any group outside those bounds will not be removed,
2423 but the filling will only occur between the specified bounds. When not
2424 specified, existing groups will be used as bounds, if applicable.
2425 By specifying such bounds, we can get empty groups before/after any
2426 group with data.
2428 If we want to fill groups only between August (fill_from)
2429 and October (fill_to)::
2431 ___
2432 ___ | |
2433 | | ___ | |
2434 |___| ___ |___| ___ |___|
2435 Jun Aug Sep Oct Dec
2437 We still get June and December. To filter them out, we should match
2438 `fill_from` and `fill_to` with the domain e.g. ``['&',
2439 ('date_field', '>=', 'YYYY-08-01'), ('date_field', '<', 'YYYY-11-01')]``::
2441 ___
2442 ___ |___| ___
2443 Aug Sep Oct
2445 Minimal filling amount:
2446 Using `min_groups`, we can specify that we want at least that amount of
2447 contiguous groups. This amount is guaranteed to be provided from
2448 `fill_from` if specified, or from the lowest existing group otherwise.
2449 This amount is not restricted by `fill_to`. If there is an existing
2450 group before `fill_from`, `fill_from` is still used as the starting
2451 group for min_groups, because the filling does not apply on that
2452 existing group. If neither `fill_from` nor `fill_to` is specified, and
2453 there is no existing group, no group will be returned.
2455 If we set min_groups = 4::
2457 ___
2458 ___ |___| ___ ___
2459 Aug Sep Oct Nov
2461 :param list data: the data containing groups
2462 :param list groupby: list of fields being grouped on
2463 :param list annoted_aggregates: dict of "<key_name>:<aggregate specification>"
2464 :param str fill_from: (inclusive) string representation of a
2465 date/datetime, start bound of the fill_temporal range
2466 formats: date -> %Y-%m-%d, datetime -> %Y-%m-%d %H:%M:%S
2467 :param str fill_to: (inclusive) string representation of a
2468 date/datetime, end bound of the fill_temporal range
2469 formats: date -> %Y-%m-%d, datetime -> %Y-%m-%d %H:%M:%S
2470 :param int min_groups: minimal amount of required groups for the
2471 fill_temporal range (should be >= 1)
2472 :rtype: list
2473 :return: list
2474 """
2475 # TODO: remove min_groups
2476 first_group = groupby[0]
2477 field_name = first_group.split(':')[0].split(".")[0]
2478 field = self._fields[field_name]
2479 if field.type not in ('date', 'datetime') and not (field.type == 'properties' and ':' in first_group):
2480 return data
2482 granularity = first_group.split(':')[1] if ':' in first_group else 'month'
2483 days_offset = 0
2484 if granularity == 'week':
2485 # _read_group_process_groupby week groups are dependent on the
2486 # locale, so filled groups should be too to avoid overlaps.
2487 first_week_day = int(get_lang(self.env).week_start) - 1
2488 days_offset = first_week_day and 7 - first_week_day
2489 interval = READ_GROUP_TIME_GRANULARITY[granularity]
2490 tz = False
2491 if field.type == 'datetime' and self.env.context.get('tz') in pytz.all_timezones_set:
2492 tz = pytz.timezone(self.env.context['tz'])
2494 # TODO: refactor remaing lines here
2496 # existing non null datetimes
2497 existing = [d[first_group] for d in data if d[first_group]] or [None]
2498 # assumption: existing data is sorted by field 'groupby_name'
2499 existing_from, existing_to = existing[0], existing[-1]
2500 if fill_from:
2501 fill_from = Datetime.to_datetime(fill_from) if isinstance(fill_from, datetime.datetime) else Date.to_date(fill_from)
2502 fill_from = date_utils.start_of(fill_from, granularity) - datetime.timedelta(days=days_offset)
2503 if tz:
2504 fill_from = tz.localize(fill_from)
2505 elif existing_from:
2506 fill_from = existing_from
2507 if fill_to:
2508 fill_to = Datetime.to_datetime(fill_to) if isinstance(fill_to, datetime.datetime) else Date.to_date(fill_to)
2509 fill_to = date_utils.start_of(fill_to, granularity) - datetime.timedelta(days=days_offset)
2510 if tz:
2511 fill_to = tz.localize(fill_to)
2512 elif existing_to:
2513 fill_to = existing_to
2515 if not fill_to and fill_from:
2516 fill_to = fill_from
2517 if not fill_from and fill_to:
2518 fill_from = fill_to
2519 if not fill_from and not fill_to:
2520 return data
2522 if min_groups > 0:
2523 fill_to = max(fill_to, fill_from + (min_groups - 1) * interval)
2525 if fill_to < fill_from:
2526 return data
2528 required_dates = date_utils.date_range(fill_from, fill_to, interval)
2530 if existing[0] is None:
2531 existing = list(required_dates)
2532 else:
2533 existing = sorted(set().union(existing, required_dates))
2535 empty_item = {
2536 name: self._read_group_empty_value(spec)
2537 for name, spec in annoted_aggregates.items()
2538 }
2539 for group in groupby[1:]:
2540 empty_item[group] = self._read_group_empty_value(group)
2542 grouped_data = collections.defaultdict(list)
2543 for d in data:
2544 grouped_data[d[first_group]].append(d)
2546 result = []
2547 for dt in existing:
2548 result.extend(grouped_data[dt] or [dict(empty_item, **{first_group: dt})])
2550 if False in grouped_data:
2551 result.extend(grouped_data[False])
2553 return result
2555 @api.model
2556 def _read_group_format_result(self, rows_dict, lazy_groupby):
2557 """
2558 Helper method to format the data contained in the dictionary data by
2559 adding the domain corresponding to its values, the groupbys in the
2560 context and by properly formatting the date/datetime values.
2561 """
2562 for group in lazy_groupby:
2563 field_name = group.split(':')[0].split('.')[0]
2564 field = self._fields[field_name]
2566 if field.type in ('date', 'datetime'):
2567 granularity = group.split(':')[1] if ':' in group else 'month'
2568 if granularity in READ_GROUP_TIME_GRANULARITY:
2569 locale = get_lang(self.env).code
2570 fmt = DEFAULT_SERVER_DATETIME_FORMAT if field.type == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
2571 interval = READ_GROUP_TIME_GRANULARITY[granularity]
2572 elif field.type == "properties":
2573 self._read_group_format_result_properties(rows_dict, group)
2574 continue
2576 for row in rows_dict:
2577 value = row[group]
2579 if isinstance(value, BaseModel):
2580 row[group] = (value.id, value.sudo().display_name) if value else False
2581 value = value.id
2583 if not value and field.type == 'many2many':
2584 additional_domain = [(field_name, 'not any', [])]
2585 else:
2586 additional_domain = [(field_name, '=', value)]
2588 if field.type in ('date', 'datetime'):
2589 if value and isinstance(value, (datetime.date, datetime.datetime)):
2590 range_start = value
2591 range_end = value + interval
2592 if field.type == 'datetime':
2593 tzinfo = None
2594 if self.env.context.get('tz') in pytz.all_timezones_set:
2595 tzinfo = pytz.timezone(self.env.context['tz'])
2596 range_start = tzinfo.localize(range_start).astimezone(pytz.utc)
2597 # take into account possible hour change between start and end
2598 range_end = tzinfo.localize(range_end).astimezone(pytz.utc)
2600 label = babel.dates.format_datetime(
2601 range_start, format=READ_GROUP_DISPLAY_FORMAT[granularity],
2602 tzinfo=tzinfo, locale=locale
2603 )
2604 else:
2605 label = babel.dates.format_date(
2606 value, format=READ_GROUP_DISPLAY_FORMAT[granularity],
2607 locale=locale
2608 )
2609 # special case weeks because babel is broken *and*
2610 # ubuntu reverted a change so it's also inconsistent
2611 if granularity == 'week':
2612 year, week = date_utils.weeknumber(
2613 babel.Locale.parse(locale),
2614 value, # provide date or datetime without UTC conversion
2615 )
2616 label = f"W{week} {year:04}"
2618 range_start = range_start.strftime(fmt)
2619 range_end = range_end.strftime(fmt)
2620 row[group] = label # TODO should put raw data
2621 row.setdefault('__range', {})[group] = {'from': range_start, 'to': range_end}
2622 additional_domain = [
2623 '&',
2624 (field_name, '>=', range_start),
2625 (field_name, '<', range_end),
2626 ]
2627 elif value is not None and granularity in READ_GROUP_NUMBER_GRANULARITY:
2628 additional_domain = [(f"{field_name}.{granularity}", '=', value)]
2629 elif not value:
2630 # Set the __range of the group containing records with an unset
2631 # date/datetime field value to False.
2632 row.setdefault('__range', {})[group] = False
2634 row['__domain'] &= Domain(additional_domain)
2635 for row in rows_dict:
2636 row['__domain'] = list(row['__domain'])
2638 def _read_group_format_result_properties(self, rows_dict, group):
2639 """Modify the final read group properties result.
2641 Replace the relational properties ids by a tuple with their display names,
2642 replace the "raw" tags and selection values by a list containing their labels.
2643 Adapt the domains for the Falsy group (we can't just keep (selection, =, False)
2644 e.g. because some values in database might correspond to option that have
2645 been remove on the parent).
2646 """
2647 if '.' not in group:
2648 raise ValueError('You must choose the property you want to group by.')
2649 fullname, __, func = group.partition(':')
2651 definition = self.get_property_definition(fullname)
2652 property_type = definition.get('type')
2654 if property_type == 'selection':
2655 options = definition.get('selection') or []
2656 options = tuple(option[0] for option in options)
2657 for row in rows_dict:
2658 if not row[fullname]:
2659 # can not do ('selection', '=', False) because we might have
2660 # option in database that does not exist anymore
2661 additional_domain = Domain(fullname, '=', False) | Domain(fullname, 'not in', options)
2662 else:
2663 additional_domain = Domain(fullname, '=', row[fullname])
2665 row['__domain'] &= additional_domain
2667 elif property_type == 'many2one':
2668 comodel = definition.get('comodel')
2669 prefetch_ids = tuple(row[fullname] for row in rows_dict if row[fullname])
2670 all_groups = tuple(row[fullname] for row in rows_dict if row[fullname])
2671 for row in rows_dict:
2672 if not row[fullname]:
2673 # can not only do ('many2one', '=', False) because we might have
2674 # record in database that does not exist anymore
2675 additional_domain = Domain(fullname, '=', False) | Domain(fullname, 'not in', all_groups)
2676 else:
2677 additional_domain = Domain(fullname, '=', row[fullname])
2678 record = self.env[comodel].browse(row[fullname]).with_prefetch(prefetch_ids)
2679 row[fullname] = (row[fullname], record.display_name)
2681 row['__domain'] &= additional_domain
2683 elif property_type == 'many2many':
2684 comodel = definition.get('comodel')
2685 prefetch_ids = tuple(row[fullname] for row in rows_dict if row[fullname])
2686 all_groups = tuple(row[fullname] for row in rows_dict if row[fullname])
2687 for row in rows_dict:
2688 if not row[fullname]:
2689 if all_groups:
2690 additional_domain = Domain(fullname, '=', False) | Domain.AND([(fullname, 'not in', group)] for group in all_groups)
2691 else:
2692 additional_domain = Domain.TRUE
2693 else:
2694 additional_domain = Domain(fullname, 'in', row[fullname])
2695 record = self.env[comodel].browse(row[fullname]).with_prefetch(prefetch_ids)
2696 row[fullname] = (row[fullname], record.display_name)
2698 row['__domain'] &= additional_domain
2700 elif property_type == 'tags':
2701 tags = definition.get('tags') or []
2702 tags = {tag[0]: tag for tag in tags}
2703 for row in rows_dict:
2704 if not row[fullname]:
2705 if tags:
2706 additional_domain = Domain(fullname, '=', False) | Domain.AND([(fullname, 'not in', tag)] for tag in tags)
2707 else:
2708 additional_domain = Domain.TRUE
2709 else:
2710 additional_domain = Domain(fullname, 'in', row[fullname])
2711 # replace tag raw value with list of raw value, label and color
2712 row[fullname] = tags.get(row[fullname])
2714 row['__domain'] &= additional_domain
2716 elif property_type in ('date', 'datetime'):
2717 for row in rows_dict:
2718 if not row[group]:
2719 row[group] = False
2720 row['__domain'] &= Domain(fullname, '=', False)
2721 row['__range'] = {}
2722 continue
2724 # Date / Datetime are not JSONifiable, so they are stored as raw text
2725 db_format = '%Y-%m-%d' if property_type == 'date' else '%Y-%m-%d %H:%M:%S'
2727 if func == 'week':
2728 # the value is the first day of the week (based on local)
2729 start = row[group].strftime(db_format)
2730 end = (row[group] + datetime.timedelta(days=7)).strftime(db_format)
2731 else:
2732 start = (date_utils.start_of(row[group], func)).strftime(db_format)
2733 end = (date_utils.end_of(row[group], func) + datetime.timedelta(minutes=1)).strftime(db_format)
2735 row['__domain'] &= Domain(fullname, '>=', start) & Domain(fullname, '<', end)
2736 row['__range'] = {group: {'from': start, 'to': end}}
2737 row[group] = babel.dates.format_date(
2738 row[group],
2739 format=READ_GROUP_DISPLAY_FORMAT[func],
2740 locale=get_lang(self.env).code
2741 )
2742 else:
2743 for row in rows_dict:
2744 row['__domain'] &= Domain(fullname, '=', row[fullname])
2746 @api.model
2747 @api.readonly
2748 @api.deprecated("Since 19.0, read_group is deprecated. Please use _read_group in the backend code or formatted_read_group for a complete formatted result")
2749 def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
2750 """Deprecated - Get the list of records in list view grouped by the given ``groupby`` fields.
2752 :param list domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
2753 list to match all records.
2754 :param list fields: list of fields present in the list view specified on the object.
2755 Each element is either 'field' (field name, using the default aggregation),
2756 or 'field:agg' (aggregate field with aggregation function 'agg'),
2757 or 'name:agg(field)' (aggregate field with 'agg' and return it as 'name').
2758 The possible aggregation functions are the ones provided by
2759 `PostgreSQL <https://www.postgresql.org/docs/current/static/functions-aggregate.html>`_
2760 and 'count_distinct', with the expected meaning.
2761 :param list groupby: list of groupby descriptions by which the records will be grouped.
2762 A groupby description is either a field (then it will be grouped by that field).
2763 For the dates an datetime fields, you can specify a granularity using the syntax 'field:granularity'.
2764 The supported granularities are 'hour', 'day', 'week', 'month', 'quarter' or 'year';
2765 Read_group also supports integer date parts:
2766 'year_number', 'quarter_number', 'month_number' 'iso_week_number', 'day_of_year', 'day_of_month',
2767 'day_of_week', 'hour_number', 'minute_number' and 'second_number'.
2768 :param int offset: optional number of groups to skip
2769 :param int limit: optional max number of groups to return
2770 :param str orderby: optional ``order by`` specification, for
2771 overriding the natural sort ordering of the
2772 groups, see also :meth:`~.search`
2773 (supported only for many2one fields currently)
2774 :param bool lazy: if true, the results are only grouped by the first groupby and the
2775 remaining groupbys are put in the __context key. If false, all the groupbys are
2776 done in one call.
2777 :return: list of dictionaries(one dictionary for each record) containing:
2779 * the values of fields grouped by the fields in ``groupby`` argument
2780 * __domain: list of tuples specifying the search criteria
2781 * __context: dictionary with argument like ``groupby``
2782 * __range: (date/datetime only) dictionary with field_name:granularity as keys
2783 mapping to a dictionary with keys: "from" (inclusive) and "to" (exclusive)
2784 mapping to a string representation of the temporal bounds of the group
2785 :rtype: [{'field_name_1': value, ...}, ...]
2786 :raise AccessError: if user is not allowed to access requested information
2787 """
2788 groupby = [groupby] if isinstance(groupby, str) else groupby
2789 lazy_groupby = groupby[:1] if lazy else groupby
2791 # Compatibility layer with _read_group, it should be remove in the second part of the refactoring
2792 # - Modify `groupby` default value 'month' into specific groupby specification
2793 # - Modify `fields` into aggregates specification of _read_group
2794 # - Modify the order to be compatible with the _read_group specification
2795 groupby = [groupby] if isinstance(groupby, str) else groupby
2796 lazy_groupby = groupby[:1] if lazy else groupby
2798 annotated_groupby = {} # Key as the name in the result, value as the explicit groupby specification
2799 for group_spec in lazy_groupby:
2800 field_name, property_name, granularity = parse_read_group_spec(group_spec)
2801 if field_name not in self._fields:
2802 raise ValueError(f"Invalid field {field_name!r} on model {self._name!r}")
2803 field = self._fields[field_name]
2804 if property_name and field.type != 'properties':
2805 raise ValueError(f"Property name {property_name!r} has to be used on a property field.")
2806 if field.type in ('date', 'datetime'):
2807 annotated_groupby[group_spec] = f"{field_name}:{granularity or 'month'}"
2808 else:
2809 annotated_groupby[group_spec] = group_spec
2811 annotated_aggregates = { # Key as the name in the result, value as the explicit aggregate specification
2812 f"{lazy_groupby[0].split(':')[0]}_count" if lazy and len(lazy_groupby) == 1 else '__count': '__count',
2813 }
2814 for field_spec in fields:
2815 if field_spec == '__count':
2816 continue
2817 match = regex_field_agg.match(field_spec)
2818 if not match:
2819 raise ValueError(f"Invalid field specification {field_spec!r}.")
2820 name, func, fname = match.groups()
2822 if fname: # Manage this kind of specification : "field_min:min(field)"
2823 annotated_aggregates[name] = f"{fname}:{func}"
2824 continue
2825 if func: # Manage this kind of specification : "field:min"
2826 annotated_aggregates[name] = f"{name}:{func}"
2827 continue
2829 if name not in self._fields:
2830 raise ValueError(f"Invalid field {name!r} on model {self._name!r}")
2831 field = self._fields[name]
2832 if field.base_field.store and field.base_field.column_type and field.aggregator and field_spec not in annotated_groupby:
2833 annotated_aggregates[name] = f"{name}:{field.aggregator}"
2835 if orderby:
2836 new_terms = []
2837 for order_term in orderby.split(','):
2838 order_term = order_term.strip()
2839 for key_name, annotated in itertools.chain(reversed(annotated_groupby.items()), annotated_aggregates.items()):
2840 key_name = key_name.split(':')[0]
2841 if order_term.startswith(f'{key_name} ') or key_name == order_term:
2842 order_term = order_term.replace(key_name, annotated)
2843 break
2844 new_terms.append(order_term)
2845 orderby = ','.join(new_terms)
2846 else:
2847 orderby = ','.join(annotated_groupby.values())
2849 domain = Domain(domain)
2850 rows = self._read_group(domain, annotated_groupby.values(), annotated_aggregates.values(), offset=offset, limit=limit, order=orderby)
2851 rows_dict = [
2852 dict(zip(itertools.chain(annotated_groupby, annotated_aggregates), row))
2853 for row in rows
2854 ]
2856 fill_temporal = self.env.context.get('fill_temporal')
2857 if lazy_groupby and (rows_dict and fill_temporal) or isinstance(fill_temporal, dict):
2858 # fill_temporal = {} is equivalent to fill_temporal = True
2859 # if fill_temporal is a dictionary and there is no data, there is a chance that we
2860 # want to display empty columns anyway, so we should apply the fill_temporal logic
2861 if not isinstance(fill_temporal, dict):
2862 fill_temporal = {}
2863 # TODO Shouldn't be possible with a limit
2864 rows_dict = self._read_group_fill_temporal(
2865 rows_dict, lazy_groupby,
2866 annotated_aggregates, **fill_temporal,
2867 )
2869 if lazy_groupby and lazy:
2870 # Right now, read_group only fill results in lazy mode (by default).
2871 # If you need to have the empty groups in 'eager' mode, then the
2872 # method _read_group_fill_results need to be completely reimplemented
2873 # in a sane way
2874 # TODO Shouldn't be possible with a limit or the limit should be in account
2875 rows_dict = self._read_group_fill_results(
2876 domain, lazy_groupby[0],
2877 annotated_aggregates, rows_dict, read_group_order=orderby,
2878 )
2880 for row in rows_dict:
2881 row['__domain'] = domain
2882 if len(lazy_groupby) < len(groupby):
2883 row['__context'] = {'group_by': groupby[len(lazy_groupby):]}
2885 self._read_group_format_result(rows_dict, lazy_groupby)
2887 return rows_dict
2889 def _traverse_related_sql(self, alias: str, field: Field, query: Query) -> tuple[BaseModel, Field, str]:
2890 """ Traverse the related `field` and add needed join to the `query`.
2892 :returns: tuple ``(model, field, alias)``, where ``field`` is the last
2893 field in the sequence, ``model`` is that field's model, and
2894 ``alias`` is the model's table alias
2895 """
2896 assert field.related and not field.store
2897 if not (self.env.su or field.compute_sudo or field.inherited): 2897 ↛ 2898line 2897 didn't jump to line 2898 because the condition on line 2897 was never true
2898 raise ValueError(f'Cannot convert {field} to SQL because it is not a sudoed related or inherited field')
2900 model = self.sudo(self.env.su or field.compute_sudo)
2901 *path_fnames, last_fname = field.related.split('.')
2902 for path_fname in path_fnames:
2903 path_field = model._fields[path_fname]
2904 if path_field.type != 'many2one': 2904 ↛ 2905line 2904 didn't jump to line 2905 because the condition on line 2904 was never true
2905 raise ValueError(f'Cannot convert {field} (related={field.related}) to SQL because {path_fname} is not a Many2one')
2906 model, alias = path_field.join(model, alias, query)
2908 return model, model._fields[last_fname], alias
2910 def _field_to_sql(self, alias: str, field_expr: str, query: (Query | None) = None) -> SQL:
2911 """ Return an :class:`SQL` object that represents the value of the given
2912 field from the given table alias, in the context of the given query.
2913 The method also checks that the field is accessible for reading.
2915 The query object is necessary for inherited fields, many2one fields and
2916 properties fields, where joins are added to the query.
2917 """
2918 fname, property_name = parse_field_expr(field_expr)
2919 field = self._fields.get(fname)
2920 if not field: 2920 ↛ 2921line 2920 didn't jump to line 2921 because the condition on line 2920 was never true
2921 raise ValueError(f"Invalid field {fname!r} on model {self._name!r}")
2923 if field.related and not field.store:
2924 model, field, alias = self._traverse_related_sql(alias, field, query)
2925 related_expr = field.name if not property_name else f"{field.name}.{property_name}"
2926 return model._field_to_sql(alias, related_expr, query)
2928 self._check_field_access(field, 'read')
2930 sql = field.to_sql(self, alias)
2931 if property_name: 2931 ↛ 2932line 2931 didn't jump to line 2932 because the condition on line 2931 was never true
2932 sql = field.property_to_sql(sql, property_name, self, alias, query)
2933 return sql
2935 def _read_group_groupby_properties(self, alias: str, field: Field, property_name: str, query: Query) -> SQL:
2936 fname = field.name
2937 definition = self.get_property_definition(f"{fname}.{property_name}")
2938 property_type = definition.get('type')
2939 sql_property = self._field_to_sql(alias, f'{fname}.{property_name}', query)
2941 # JOIN on the JSON array
2942 if property_type in ('tags', 'many2many'):
2943 property_alias = query.make_alias(alias, f'{fname}_{property_name}')
2944 sql_property = SQL(
2945 """ CASE
2946 WHEN jsonb_typeof(%(property)s) = 'array'
2947 THEN %(property)s
2948 ELSE '[]'::jsonb
2949 END """,
2950 property=sql_property,
2951 )
2952 if property_type == 'tags':
2953 # ignore invalid tags
2954 tags = [tag[0] for tag in definition.get('tags') or []]
2955 # `->>0 : convert "JSON string" into string
2956 condition = SQL(
2957 "%s->>0 = ANY(%s::text[])",
2958 SQL.identifier(property_alias), tags,
2959 )
2960 else:
2961 comodel = self.env.get(definition.get('comodel'))
2962 if comodel is None or comodel._transient or comodel._abstract:
2963 raise UserError(_(
2964 "You cannot use “%(property_name)s” because the linked “%(model_name)s” model doesn't exist or is invalid",
2965 property_name=definition.get('string', property_name), model_name=definition.get('comodel'),
2966 ))
2968 # check the existences of the many2many
2969 condition = SQL(
2970 "%s::int IN (SELECT id FROM %s)",
2971 SQL.identifier(property_alias), SQL.identifier(comodel._table),
2972 )
2974 query.add_join(
2975 "LEFT JOIN",
2976 property_alias,
2977 SQL("jsonb_array_elements(%s)", sql_property),
2978 condition,
2979 )
2981 return SQL.identifier(property_alias)
2983 elif property_type == 'selection':
2984 options = [option[0] for option in definition.get('selection') or ()]
2986 # check the existence of the option
2987 property_alias = query.make_alias(alias, f'{fname}_{property_name}')
2988 query.add_join(
2989 "LEFT JOIN",
2990 property_alias,
2991 SQL("(SELECT unnest(%s::text[]) %s)", options, SQL.identifier(property_alias)),
2992 SQL("%s->>0 = %s", sql_property, SQL.identifier(property_alias)),
2993 )
2995 return SQL.identifier(property_alias)
2997 elif property_type == 'many2one':
2998 comodel = self.env.get(definition.get('comodel'))
2999 if comodel is None or comodel._transient or comodel._abstract:
3000 raise UserError(_(
3001 "You cannot use “%(property_name)s” because the linked “%(model_name)s” model doesn't exist or is invalid",
3002 property_name=definition.get('string', property_name), model_name=definition.get('comodel'),
3003 ))
3005 return SQL(
3006 """ CASE
3007 WHEN jsonb_typeof(%(property)s) = 'number'
3008 AND (%(property)s)::int IN (SELECT id FROM %(table)s)
3009 THEN %(property)s
3010 ELSE NULL
3011 END """,
3012 property=sql_property,
3013 table=SQL.identifier(comodel._table),
3014 )
3016 elif property_type == 'date':
3017 return SQL(
3018 """ CASE
3019 WHEN jsonb_typeof(%(property)s) = 'string'
3020 THEN (%(property)s->>0)::DATE
3021 ELSE NULL
3022 END """,
3023 property=sql_property,
3024 )
3026 elif property_type == 'datetime':
3027 return SQL(
3028 """ CASE
3029 WHEN jsonb_typeof(%(property)s) = 'string'
3030 THEN to_timestamp(%(property)s->>0, 'YYYY-MM-DD HH24:MI:SS')
3031 ELSE NULL
3032 END """,
3033 property=sql_property,
3034 )
3036 elif property_type == 'html':
3037 raise UserError(_('Grouping by HTML properties is not supported.'))
3039 # if the key is not present in the dict, fallback to false instead of none
3040 return SQL("COALESCE(%s, 'false')", sql_property)
3042 @api.model
3043 def get_property_definition(self, full_name: str) -> dict:
3044 """Return the definition of the given property.
3046 :param full_name: Name of the field / property
3047 (e.g. "property.integer")
3048 """
3049 self.browse().check_access("read")
3050 field_name, property_name = parse_field_expr(full_name)
3051 field = self._fields.get(field_name)
3052 if not field:
3053 raise ValueError(f"Invalid field {field_name!r} on model {self._name!r}")
3054 from .fields_properties import check_property_field_value_name # noqa: PLC0415
3055 check_property_field_value_name(property_name)
3057 target_model = self.env[self._fields[field.definition_record].comodel_name]
3058 field_definition = target_model._fields[field.definition_record_field]
3059 result = self.env.execute_query_dict(SQL(
3060 """ SELECT definition
3061 FROM %(table)s, jsonb_array_elements(%(field)s) definition
3062 WHERE %(field)s IS NOT NULL AND definition->>'name' = %(name)s
3063 LIMIT 1 """,
3064 table=SQL.identifier(target_model._table),
3065 field=SQL.identifier(field.definition_record_field, to_flush=field_definition),
3066 name=property_name,
3067 ))
3068 return result[0]["definition"] if result else {}
3070 def _parent_store_compute(self) -> None:
3071 """ Compute parent_path field from scratch. """
3072 if not self._parent_store: 3072 ↛ 3073line 3072 didn't jump to line 3073 because the condition on line 3072 was never true
3073 return
3075 # Each record is associated to a string 'parent_path', that represents
3076 # the path from the record's root node to the record. The path is made
3077 # of the node ids suffixed with a slash (see example below). The nodes
3078 # in the subtree of record are the ones where 'parent_path' starts with
3079 # the 'parent_path' of record.
3080 #
3081 # a node | id | parent_path
3082 # / \ a | 42 | 42/
3083 # ... b b | 63 | 42/63/
3084 # / \ c | 84 | 42/63/84/
3085 # c d d | 85 | 42/63/85/
3086 #
3087 # Note: the final '/' is necessary to match subtrees correctly: '42/63'
3088 # is a prefix of '42/630', but '42/63/' is not a prefix of '42/630/'.
3089 _logger.info('Computing parent_path for table %s...', self._table)
3090 query = SQL(
3091 """ WITH RECURSIVE __parent_store_compute(id, parent_path) AS (
3092 SELECT row.id, concat(row.id, '/')
3093 FROM %(table)s row
3094 WHERE row.%(parent)s IS NULL
3095 UNION
3096 SELECT row.id, concat(comp.parent_path, row.id, '/')
3097 FROM %(table)s row, __parent_store_compute comp
3098 WHERE row.%(parent)s = comp.id
3099 )
3100 UPDATE %(table)s row SET parent_path = comp.parent_path
3101 FROM __parent_store_compute comp
3102 WHERE row.id = comp.id """,
3103 table=SQL.identifier(self._table),
3104 parent=SQL.identifier(self._parent_name),
3105 )
3106 self.env.cr.execute(query)
3107 self.invalidate_model(['parent_path'])
3109 def _check_removed_columns(self, log=False):
3110 if self._abstract:
3111 return
3112 # iterate on the database columns to drop the NOT NULL constraints of
3113 # fields which were required but have been removed (or will be added by
3114 # another module)
3115 cr = self.env.cr
3116 cols = [name for name, field in self._fields.items()
3117 if field.store and field.column_type]
3118 cr.execute(SQL(
3119 """ SELECT a.attname, a.attnotnull
3120 FROM pg_class c, pg_attribute a
3121 WHERE c.relname=%s
3122 AND c.relnamespace = current_schema::regnamespace
3123 AND c.oid=a.attrelid
3124 AND a.attisdropped=%s
3125 AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')
3126 AND a.attname NOT IN %s """,
3127 self._table, False, tuple(cols),
3128 ))
3130 for row in cr.dictfetchall():
3131 if log:
3132 _logger.debug("column %s is in the table %s but not in the corresponding object %s",
3133 row['attname'], self._table, self._name)
3134 if row['attnotnull']:
3135 sql.drop_not_null(cr, self._table, row['attname'])
3137 def _init_column(self, column_name):
3138 """ Initialize the value of the given column for existing rows. """
3139 # get the default value; ideally, we should use default_get(), but it
3140 # fails due to ir.default not being ready
3141 field = self._fields[column_name]
3142 if field.default:
3143 value = field.default(self)
3144 value = field.convert_to_write(value, self)
3145 value = field.convert_to_column_insert(value, self)
3146 else:
3147 value = None
3148 # Write value if non-NULL, except for booleans for which False means
3149 # the same as NULL - this saves us an expensive query on large tables,
3150 # if the boolean is required we still write False to allow NOT NULL constraints.
3151 necessary = (value is not None) if field.type != 'boolean' or field.required else value
3152 if necessary:
3153 _logger.debug("Table '%s': setting default value of new column %s to %r",
3154 self._table, column_name, value)
3155 self.env.cr.execute(SQL(
3156 "UPDATE %(table)s SET %(field)s = %(value)s WHERE %(field)s IS NULL",
3157 table=SQL.identifier(self._table),
3158 field=SQL.identifier(column_name),
3159 value=value,
3160 ))
3162 @ormcache()
3163 def _table_has_rows(self) -> bool:
3164 """ Return whether the model's table has rows. This method should only
3165 be used when updating the database schema (:meth:`~._auto_init`).
3166 """
3167 self.env.cr.execute(SQL('SELECT 1 FROM %s LIMIT 1', SQL.identifier(self._table)))
3168 return bool(self.env.cr.rowcount)
3170 def _auto_init(self) -> None:
3171 """ Initialize the database schema of ``self``:
3172 - create the corresponding table,
3173 - create/update the necessary columns/tables for fields,
3174 - initialize new columns on existing rows,
3175 - add the SQL constraints given on the model,
3176 - add the indexes on indexed fields,
3178 Also prepare post-init stuff to:
3179 - add foreign key constraints,
3180 - reflect models, fields, relations and constraints,
3181 - mark fields to recompute on existing records.
3183 Note: you should not override this method. Instead, you can modify
3184 the model's database schema by overriding method :meth:`~.init`,
3185 which is called right after this one.
3186 """
3187 raise_on_invalid_object_name(self._name)
3189 # This prevents anything called by this method (in particular default
3190 # values) from prefetching a field for which the corresponding column
3191 # has not been added in database yet!
3192 self = self.with_context(prefetch_fields=False)
3194 cr = self.env.cr
3195 update_custom_fields = self.env.context.get('update_custom_fields', False)
3196 must_create_table = not sql.table_exists(cr, self._table)
3197 parent_path_compute = False
3199 if self._auto:
3200 if must_create_table:
3201 def make_type(field):
3202 return field.column_type[1] + (" NOT NULL" if field.required else "")
3204 sql.create_model_table(cr, self._table, self._description, [
3205 (field.name, make_type(field), field.string)
3206 for field in sorted(self._fields.values(), key=lambda f: f.column_order)
3207 if field.name != 'id' and field.store and field.column_type
3208 ])
3210 if self._parent_store:
3211 if not sql.column_exists(cr, self._table, 'parent_path'):
3212 sql.create_column(self.env.cr, self._table, 'parent_path', 'VARCHAR')
3213 parent_path_compute = True
3214 self._check_parent_path()
3216 if not must_create_table:
3217 self._check_removed_columns(log=False)
3219 # update the database schema for fields
3220 columns = sql.table_columns(cr, self._table)
3221 fields_to_compute = []
3223 for field in sorted(self._fields.values(), key=lambda f: f.column_order):
3224 if not field.store:
3225 continue
3226 if field.manual and not update_custom_fields:
3227 continue # don't update custom fields
3228 new = field.update_db(self, columns)
3229 if new and field.compute:
3230 fields_to_compute.append(field)
3232 if fields_to_compute:
3233 # mark existing records for computation now, so that computed
3234 # required fields are flushed before the NOT NULL constraint is
3235 # added to the database
3236 cr.execute(SQL('SELECT id FROM %s', SQL.identifier(self._table)))
3237 records = self.browse(row[0] for row in cr.fetchall())
3238 if records:
3239 for field in fields_to_compute:
3240 _logger.info("Prepare computation of %s", field)
3241 self.env.add_to_compute(field, records)
3243 if self._auto:
3244 self._add_sql_constraints()
3246 if parent_path_compute:
3247 self._parent_store_compute()
3249 @api.private
3250 def init(self) -> None:
3251 """ This method is called after :meth:`~._auto_init`, and may be
3252 overridden to create or modify a model's database schema.
3253 """
3255 def _check_parent_path(self):
3256 field = self._fields.get('parent_path')
3257 if field is None: 3257 ↛ 3258line 3257 didn't jump to line 3258 because the condition on line 3257 was never true
3258 _logger.error("add a field parent_path on model %r: `parent_path = fields.Char(index=True)`.", self._name)
3259 elif not field.index: 3259 ↛ 3260line 3259 didn't jump to line 3260 because the condition on line 3259 was never true
3260 _logger.error('parent_path field on model %r should be indexed! Add index=True to the field definition.', self._name)
3262 def _add_sql_constraints(self):
3263 """ Modify this model's database table objects so they match the one
3264 in _table_objects.
3265 """
3266 for obj in self._table_objects.values():
3267 obj.apply_to_database(self)
3269 @api.model
3270 def _sql_error_to_message(self, exc: psycopg2.Error) -> str:
3271 """ Convert a database exception to a user error message depending on the model.
3273 Note that the cursor on self has to be in a valid state.
3274 """
3275 if (constraint_name := exc.diag.constraint_name) and (cons := self._table_objects.get(constraint_name)):
3276 cons_rec = self.env['ir.model.constraint'].sudo().search_fetch([
3277 ('name', '=', constraint_name),
3278 ('model.model', '=', self._name),
3279 ], ['message'], limit=1)
3280 if message := cons_rec.message:
3281 return message
3282 # get the message from the object
3283 if message := cons.get_error_message(self, exc.diag):
3284 return message
3285 return self._sql_error_to_message_generic(exc)
3287 @api.model
3288 def _sql_error_to_message_generic(self, exc: psycopg2.Error) -> str:
3289 """ Convert a database exception to a generic user error message. """
3290 diag = exc.diag
3291 unknown = self.env._('Unknown')
3292 model_string = self.env['ir.model']._get(self._name).name or self._description
3293 info = {
3294 'model_display': f"'{model_string}' ({self._name})",
3295 'table_name': diag.table_name,
3296 'constraint_name': diag.constraint_name,
3297 }
3298 if self._table == diag.table_name:
3299 columns = get_columns_from_sql_diagnostics(self.env.cr, diag, check_registry=True)
3300 else:
3301 columns = get_columns_from_sql_diagnostics(self.env.cr, diag)
3302 info['model_display'] = unknown
3303 if not columns:
3304 info['field_display'] = unknown
3305 elif len(columns) == 1 and (field := self._fields.get(columns[0])):
3306 field_string = field._description_string(self.env)
3307 info['field_display'] = f"'{field_string}' ({field.name})"
3308 else:
3309 info['field_display'] = f"'{format_list(self.env, columns)}'"
3311 if isinstance(exc, psycopg2.errors.NotNullViolation):
3312 return self.env._(
3313 "Missing required value for the field %(field_display)s.\n"
3314 "Model: %(model_display)s\n"
3315 "- create/update: a mandatory field is not set\n"
3316 "- delete: another model requires the record being deleted, you can archive it instead\n",
3317 **info,
3318 )
3320 if isinstance(exc, psycopg2.errors.ForeignKeyViolation):
3321 if len(columns) != 1:
3322 info['field_display'] = info['constraint_name']
3323 return self.env._(
3324 "Another model is using the record you are trying to delete.\n\n"
3325 "The troublemaker is: %(model_display)s\n"
3326 "Thanks to the following constraint: %(field_display)s\n"
3327 "How about archiving the record instead?",
3328 **info,
3329 )
3331 if isinstance(exc, psycopg2.errors.UniqueViolation) and columns:
3332 column_names = [self._fields[f].string if f in self._fields else f for f in columns]
3333 info['field_display'] = f"'{', '.join(columns)}' ({format_list(self.env, column_names)})"
3334 info['detail'] = diag.message_detail # contains conflicting key and value
3335 return self.env._("The value for %(field_display)s already exists.\n\nDetail: %(detail)s\n", **info)
3337 # No good message can be created for psycopg2.errors.CheckViolation
3339 # fallback
3340 return exception_to_unicode(exc)
3342 @api.model
3343 def fields_get(self, allfields: Collection[str] | None = None, attributes: Collection[str] | None = None) -> dict[str, ValuesType]:
3344 """ Return the definition of each field.
3346 The returned value is a dictionary (indexed by field name) of
3347 dictionaries. The _inherits'd fields are included. The string, help,
3348 and selection (if present) attributes are translated.
3350 :param allfields: fields to document, all if empty or not provided
3351 :param attributes: attributes to return for each field, all if empty or not provided
3352 :return: dictionary mapping field names to a dictionary mapping attributes to values.
3353 """
3354 res = {}
3355 for fname, field in self._fields.items():
3356 if allfields and fname not in allfields:
3357 continue
3358 if not self._has_field_access(field, 'read'): 3358 ↛ 3359line 3358 didn't jump to line 3359 because the condition on line 3358 was never true
3359 continue
3361 description = field.get_description(self.env, attributes=attributes)
3362 if 'readonly' in description:
3363 description['readonly'] = description['readonly'] or not self._has_field_access(field, 'write')
3364 res[fname] = description
3366 return res
3368 @api.model
3369 def _has_field_access(self, field: Field, operation: typing.Literal['read', 'write']) -> bool:
3370 """ Determine whether the user access rights on the given field for the given operation.
3371 You may override this method to customize the access to fields.
3373 :param field: the field to check
3374 :param operation: one of ``read``, ``write``
3375 :return: whether the field is accessible
3376 """
3377 if not field.groups or self.env.su: 3377 ↛ 3379line 3377 didn't jump to line 3379 because the condition on line 3377 was always true
3378 return True
3379 if field.groups == NO_ACCESS:
3380 return False
3381 return self.env.user.has_groups(field.groups)
3383 @api.model
3384 def _check_field_access(self, field: Field, operation: typing.Literal['read', 'write']) -> None:
3385 """Check the user access rights on the given field.
3387 :param field: the field to check
3388 :param operation: one of ``read``, ``write``
3389 :raise AccessError: if the user is not allowed to access the provided field
3390 """
3391 if self._has_field_access(field, operation): 3391 ↛ 3394line 3391 didn't jump to line 3394 because the condition on line 3391 was always true
3392 return
3394 _logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s, field: %s',
3395 operation, self.env.uid, self._name, field.name)
3397 description = self.env['ir.model']._get(self._name).name
3399 error_msg = _(
3400 "You do not have enough rights to access the field \"%(field)s\""
3401 " on %(document_kind)s (%(document_model)s). "
3402 "Please contact your system administrator."
3403 "\n\nOperation: %(operation)s",
3404 field=field.name,
3405 document_kind=description,
3406 document_model=self._name,
3407 operation=operation,
3408 )
3410 if self.env.user._has_group('base.group_no_one'):
3411 if field.groups == NO_ACCESS:
3412 allowed_groups_msg = _("always forbidden")
3413 elif not field.groups:
3414 allowed_groups_msg = _("custom field access rules")
3415 else:
3416 groups_list = [self.env.ref(g) for g in field.groups.split(',')]
3417 groups = self.env['res.groups'].union(*groups_list).sorted('id')
3418 allowed_groups_msg = _(
3419 "allowed for groups %s",
3420 ', '.join(repr(g.display_name) for g in groups),
3421 )
3422 error_msg += _(
3423 "\nUser: %(user)s"
3424 "\nGroups: %(allowed_groups_msg)s",
3425 user=self.env.uid,
3426 allowed_groups_msg=allowed_groups_msg,
3427 )
3429 raise AccessError(error_msg)
3431 @api.model
3432 @api.deprecated(
3433 "Deprecated since 19.0, use `_check_field_access` on models."
3434 " To get the list of allowed fields, use `fields_get`.",
3435 )
3436 def check_field_access_rights(self, operation: str, field_names: list[str] | None) -> list[str]:
3437 """Check the user access rights on the given fields.
3439 If `field_names` is not provided, we list accessible fields to the user.
3440 Otherwise, an error is raised if we try to access a forbidden field.
3441 Note that this function ignores unknown (virtual) fields.
3443 :param operation: one of ``create``, ``read``, ``write``, ``unlink``
3444 :param field_names: names of the fields
3445 :return: provided fields if fields is truthy (or the fields
3446 readable by the current user).
3447 :raise AccessError: if the user is not allowed to access
3448 the provided fields.
3449 """
3450 if self.env.su:
3451 return field_names or list(self._fields)
3453 if not field_names:
3454 return [
3455 field_name
3456 for field_name, field in self._fields.items()
3457 if self._has_field_access(field, operation)
3458 ]
3460 for field_name in field_names:
3461 # Unknown (or virtual) fields are considered accessible because they will not be read and nothing will be written to them.
3462 field = self._fields.get(field_name)
3463 if field is None:
3464 continue
3465 self._check_field_access(field, operation)
3466 return field_names
3468 @api.readonly
3469 def read(self, fields: Sequence[str] | None = None, load: str = '_classic_read') -> list[ValuesType]:
3470 """Read the requested fields for the records in ``self``, and return their
3471 values as a list of dicts.
3473 :param fields: field names to return (default is all fields)
3474 :param load: loading mode, currently the only option is to set to
3475 ``None`` to avoid loading the `display_name` of m2o fields
3476 :return: a list of dictionaries mapping field names to their values,
3477 with one dictionary per record
3478 :raise AccessError: if user is not allowed to access requested information
3479 :raise ValueError: if a requested field does not exist
3481 This is a high-level method that is not supposed to be overridden. In
3482 order to modify how fields are read from database, see methods
3483 :meth:`_fetch_query` and :meth:`_read_format`.
3484 """
3485 if not fields: 3485 ↛ 3486line 3485 didn't jump to line 3486 because the condition on line 3485 was never true
3486 fields = list(self.fields_get(attributes=()))
3487 elif not self and not self.env.su: # check field access, otherwise done during fetch() 3487 ↛ 3488line 3487 didn't jump to line 3488 because the condition on line 3487 was never true
3488 self._determine_fields_to_fetch(fields)
3489 self._origin.fetch(fields)
3490 return self._read_format(fnames=fields, load=load)
3492 def update_field_translations(
3493 self,
3494 field_name: str,
3495 translations: dict[str, str | typing.Literal[False] | dict[str, str]],
3496 source_lang: str = '',
3497 ) -> bool:
3498 """ Update the translations for a given field
3500 See 'self._update_field_translations' docstring for details.
3501 """
3502 return self._update_field_translations(field_name, translations, source_lang=source_lang)
3504 def _update_field_translations(
3505 self,
3506 field_name: str,
3507 translations: dict[str, str | typing.Literal[False] | dict[str, str]],
3508 digest: Callable[[str], str] | None = None,
3509 source_lang: str = '',
3510 ) -> bool:
3511 """ Update the translations for a given field, with support for handling
3512 old terms using an optional digest function.
3514 :param field_name: The name of the field to update.
3515 :param translations: The translations to apply.
3516 If ``field.translate`` is ``True``, the dictionary should be in the
3517 format::
3519 {lang: new_value}
3521 where ``new_value`` can either be:
3523 * a ``str``, in which case the new translation for the specified
3524 language.
3525 * ``False``, in which case it removes the translation for the
3526 specified language and falls back to the latest en_US value.
3528 If ``field.translate`` is a callable, the dictionary should be in
3529 the format::
3531 {lang: {old_source_lang_term: new_term}}
3533 or (when ``digest`` is callable)::
3535 {lang: {digest(old_source_lang_term): new_term}}.
3537 where ``new_term`` can either be:
3539 * a non-empty ``str``, in which case the new translation of
3540 ``old_term`` for the specified language.
3541 * ``False`` or ``''``, in which case it removes the translation for
3542 the specified language and falls back to the old
3543 ``source_lang_term``.
3545 :param digest: An optional function to generate identifiers for old terms.
3546 :param source_lang: The language of ``old_source_lang_term`` in
3547 translations. Assumes ``'en_US'`` when it is not set / empty.
3548 """
3549 self.ensure_one()
3551 self.check_access('write')
3552 field = self._fields[field_name]
3553 self._check_field_access(field, 'write')
3555 valid_langs = set(code for code, _ in self.env['res.lang'].get_installed()) | {'en_US'}
3556 source_lang = source_lang or 'en_US'
3557 missing_langs = (set(translations) | {source_lang}) - valid_langs
3558 if missing_langs: 3558 ↛ 3559line 3558 didn't jump to line 3559 because the condition on line 3558 was never true
3559 raise UserError(
3560 _("The following languages are not activated: %(missing_names)s",
3561 missing_names=', '.join(missing_langs))
3562 )
3564 if not field.translate: 3564 ↛ 3565line 3564 didn't jump to line 3565 because the condition on line 3564 was never true
3565 return False # or raise error
3567 if not field.store and not field.related and field.compute: 3567 ↛ 3569line 3567 didn't jump to line 3569 because the condition on line 3567 was never true
3568 # a non-related non-stored computed field cannot be translated, even if it has inverse function
3569 return False
3571 # Strictly speaking, a translated related/computed field cannot be stored
3572 # because the compute function only support one language
3573 # `not field.store` is a redundant logic.
3574 # But some developers store translated related fields.
3575 # In these cases, only all translations of the first stored translation field will be updated
3576 # For other stored related translated field, the translation for the flush language will be updated
3577 if field.related and not field.store: 3577 ↛ 3578line 3577 didn't jump to line 3578 because the condition on line 3577 was never true
3578 related_path, field_name = field.related.rsplit(".", 1)
3579 return self.mapped(related_path)._update_field_translations(field_name, translations, digest)
3581 if field.translate is True:
3582 # falsy values (except emtpy str) are used to void the corresponding translation
3583 if any(translation and not isinstance(translation, str) for translation in translations.values()): 3583 ↛ 3584line 3583 didn't jump to line 3584 because the condition on line 3583 was never true
3584 raise UserError(_("Translations for model translated fields only accept falsy values and str"))
3585 value_en = translations.get('en_US', True)
3586 if not value_en and value_en != '': 3586 ↛ 3587line 3586 didn't jump to line 3587 because the condition on line 3586 was never true
3587 translations.pop('en_US')
3588 translations = {
3589 lang: translation if isinstance(translation, str) else None
3590 for lang, translation in translations.items()
3591 }
3592 if not translations:
3593 return False
3595 translation_fallback = translations['en_US'] if translations.get('en_US') is not None \
3596 else translations[self.env.lang] if translations.get(self.env.lang) is not None \
3597 else next((v for v in translations.values() if v is not None), None)
3598 self.invalidate_recordset([field_name])
3599 self.env.cr.execute(SQL(
3600 """ UPDATE %(table)s
3601 SET %(field)s = NULLIF(
3602 jsonb_strip_nulls(%(fallback)s || COALESCE(%(field)s, '{}'::jsonb) || %(value)s),
3603 '{}'::jsonb)
3604 WHERE id = %(id)s
3605 """,
3606 table=SQL.identifier(self._table),
3607 field=SQL.identifier(field_name),
3608 fallback=Json({'en_US': translation_fallback}),
3609 value=Json(translations),
3610 id=self.id,
3611 ))
3612 self.modified([field_name])
3613 else:
3614 old_values = field._get_stored_translations(self)
3615 if not old_values: 3615 ↛ 3616line 3615 didn't jump to line 3616 because the condition on line 3615 was never true
3616 return False
3618 for lang in translations:
3619 # for languages to be updated, use the unconfirmed translated value to replace the language value
3620 if f'_{lang}' in old_values: 3620 ↛ 3621line 3620 didn't jump to line 3621 because the condition on line 3620 was never true
3621 old_values[lang] = old_values.pop(f'_{lang}')
3622 translations = {lang: _translations for lang, _translations in translations.items() if _translations}
3624 old_source_lang_value = old_values[next(
3625 lang
3626 for lang in [f'_{source_lang}', source_lang, '_en_US', 'en_US']
3627 if lang in old_values)]
3628 old_values_to_translate = {
3629 lang: value
3630 for lang, value in old_values.items()
3631 if lang != source_lang and lang in translations
3632 }
3633 old_translation_dictionary = field.get_translation_dictionary(old_source_lang_value, old_values_to_translate)
3635 if digest: 3635 ↛ 3637line 3635 didn't jump to line 3637 because the condition on line 3635 was never true
3636 # replace digested old_en_term with real old_en_term
3637 digested2term = {
3638 digest(old_en_term): old_en_term
3639 for old_en_term in old_translation_dictionary
3640 }
3641 translations = {
3642 lang: {
3643 digested2term[src]: value
3644 for src, value in lang_translations.items()
3645 if src in digested2term
3646 }
3647 for lang, lang_translations in translations.items()
3648 }
3650 new_values = old_values
3651 for lang, _translations in translations.items():
3652 _old_translations = {src: values[lang] for src, values in old_translation_dictionary.items() if lang in values}
3653 _new_translations = {**_old_translations, **_translations}
3654 new_values[lang] = field.convert_to_cache(field.translate(_new_translations.get, old_source_lang_value), self)
3655 field._update_cache(self.with_context(prefetch_langs=True), new_values, dirty=True)
3657 # the following write is incharge of
3658 # 1. mark field as modified
3659 # 2. execute logics in the override `write` method
3660 # even if the value in cache is the same as the value written
3661 self[field_name] = self[field_name]
3662 return True
3664 def get_field_translations(self, field_name: str, langs: Collection[str] | None = None) -> tuple[list[dict[str, str]], dict[str, typing.Any]]:
3665 """ Get model/model_term translations for records.
3667 :param field_name: field name
3668 :param langs: languages
3670 :return: (translations, context) where
3671 translations: list of dicts like [{"lang": lang, "source": source_term, "value": value_term}]
3672 context: {"translation_type": "text"/"char", "translation_show_source": True/False}
3673 """
3674 self.ensure_one()
3675 field = self._fields[field_name]
3676 # We don't forbid reading inactive/non-existing languages,
3677 langs = set(langs or [l[0] for l in self.env['res.lang'].get_installed()])
3678 self_lang = self.with_context(check_translations=True, prefetch_langs=True)
3679 val_en = self_lang.with_context(lang='en_US')[field_name]
3680 if not field.translate:
3681 translations = []
3682 elif field.translate is True:
3683 translations = [{
3684 'lang': lang,
3685 'source': val_en,
3686 'value': self_lang.with_context(lang=lang)[field_name]
3687 } for lang in langs]
3688 else:
3689 translation_dictionary = field.get_translation_dictionary(
3690 val_en, {lang: self_lang.with_context(lang=lang)[field_name] for lang in langs}
3691 )
3692 translations = [{
3693 'lang': lang,
3694 'source': term_en,
3695 'value': term_lang if term_lang != term_en else ''
3696 } for term_en, translations in translation_dictionary.items()
3697 for lang, term_lang in translations.items()]
3698 context = {}
3699 context['translation_type'] = 'text' if field.type in ['text', 'html'] else 'char'
3700 context['translation_show_source'] = callable(field.translate)
3702 return translations, context
3704 def _get_base_lang(self) -> str:
3705 """ Return the base language of the record. """
3706 self.ensure_one()
3707 return 'en_US'
3709 def _read_format(self, fnames: Sequence[str], load: str = '_classic_read') -> list[ValuesType]:
3710 """Return a list of dictionaries mapping field names to their values,
3711 with one dictionary per record that exists.
3713 The output format is the one expected from the `read` method, which uses
3714 this method as its implementation for formatting values.
3716 For the properties fields, call convert_to_read_multi instead of convert_to_read
3717 to prepare everything (record existences, display name, etc) in batch.
3719 The current method is different from `read` because it retrieves its
3720 values from the cache without doing a query when it is avoidable.
3721 """
3722 data = [(record, {'id': record.id}) for record in self]
3723 use_display_name = (load == '_classic_read')
3724 for name in fnames:
3725 field = self._fields[name]
3726 if field.type == 'properties': 3726 ↛ 3727line 3726 didn't jump to line 3727 because the condition on line 3726 was never true
3727 values_list = []
3728 records = []
3729 for record, vals in data:
3730 try:
3731 values_list.append(record[name])
3732 records.append(record.id)
3733 except MissingError:
3734 vals.clear()
3736 results = field.convert_to_read_multi(values_list, self.browse(records))
3737 for record_read_vals, convert_result in zip(data, results):
3738 record_read_vals[1][name] = convert_result
3739 continue
3741 convert = field.convert_to_read
3742 for record, vals in data:
3743 # missing records have their vals empty
3744 if not vals: 3744 ↛ 3745line 3744 didn't jump to line 3745 because the condition on line 3744 was never true
3745 continue
3746 try:
3747 vals[name] = convert(record[name], record, use_display_name)
3748 except MissingError:
3749 vals.clear()
3750 result = [vals for record, vals in data if vals]
3752 return result
3754 def _fetch_field(self, field: Field) -> None:
3755 """ Read from the database in order to fetch ``field`` (:class:`Field`
3756 instance) for ``self`` in cache.
3757 """
3758 # determine which fields can be prefetched
3759 if self.env.context.get('prefetch_fields', True) and field.prefetch:
3760 fnames = [
3761 name
3762 for name, f in self._fields.items()
3763 # select fields with the same prefetch group
3764 if f.prefetch == field.prefetch
3765 # discard fields with groups that the user may not access
3766 if self._has_field_access(f, 'read')
3767 ]
3768 if field.name not in fnames: 3768 ↛ 3769line 3768 didn't jump to line 3769 because the condition on line 3768 was never true
3769 fnames.append(field.name)
3770 else:
3771 fnames = [field.name]
3772 self.fetch(fnames)
3774 @api.private
3775 def fetch(self, field_names: Collection[str] | None = None) -> None:
3776 """ Make sure the given fields are in memory for the records in ``self``,
3777 by fetching what is necessary from the database. Non-stored fields are
3778 mostly ignored, except for their stored dependencies. This method should
3779 be called to optimize code.
3781 :param field_names: a collection of field names to fetch, or ``None`` for
3782 all accessible fields marked with ``prefetch=True``
3783 :raise AccessError: if user is not allowed to access requested information
3785 This method is implemented thanks to methods :meth:`_search` and
3786 :meth:`_fetch_query`, and should not be overridden.
3787 """
3788 self = self._origin # noqa: PLW0642 filtered out new records
3789 if not self or not (field_names is None or field_names):
3790 return
3792 fields_to_fetch = self._determine_fields_to_fetch(field_names, ignore_when_in_cache=True)
3794 # first determine a query that satisfies the domain and access rules
3795 if any(field.column_type for field in fields_to_fetch):
3796 query = self._search([('id', 'in', self.ids)], active_test=False)
3797 else:
3798 try:
3799 self.check_access('read')
3800 except MissingError:
3801 # Method fetch() should never raise a MissingError, but method
3802 # check_access() can, because it must read fields on self.
3803 # So we restrict 'self' to existing records (to avoid an extra
3804 # exists() at the end of the method).
3805 self = self.exists()
3806 self.check_access('read')
3807 if not fields_to_fetch:
3808 return
3809 query = self._as_query(ordered=False)
3811 # fetch the fields
3812 fetched = self._fetch_query(query, fields_to_fetch)
3814 # possibly raise exception for the records that could not be read
3815 if fetched != self: 3815 ↛ 3816line 3815 didn't jump to line 3816 because the condition on line 3815 was never true
3816 forbidden = (self - fetched).exists()
3817 if forbidden:
3818 raise self.env['ir.rule']._make_access_error('read', forbidden)
3820 def _determine_fields_to_fetch(
3821 self,
3822 field_names: Collection[str] | None = None,
3823 ignore_when_in_cache: bool = False,
3824 ) -> list[Field]:
3825 """
3826 Return the fields to fetch from database among the given field names,
3827 and following the dependencies of computed fields. The method is used
3828 by :meth:`fetch` and :meth:`search_fetch`.
3830 :param field_names: the collection of requested fields, or ``None`` for
3831 all accessible fields marked with ``prefetch=True``
3832 :param ignore_when_in_cache: whether to ignore fields that are alreay in cache for ``self``
3833 :return: the list of fields that must be fetched
3834 :raise AccessError: when trying to fetch fields to which the user does not have access
3835 """
3836 if field_names is None:
3837 return [
3838 field
3839 for field in self._fields.values()
3840 if field.prefetch is True and self._has_field_access(field, 'read')
3841 ]
3843 if not field_names:
3844 return []
3846 fields_to_fetch: list[Field] = []
3847 fields_todo = deque[Field]()
3848 fields_done = {self._fields['id']} # trick: ignore 'id'
3849 for field_name in field_names:
3850 try:
3851 field = self._fields[field_name]
3852 except KeyError as e:
3853 raise ValueError(f"Invalid field {field_name!r} on {self._name!r}") from e
3854 self._check_field_access(field, 'read')
3855 fields_todo.append(field)
3857 while fields_todo:
3858 field = fields_todo.popleft()
3859 if field in fields_done:
3860 continue
3861 fields_done.add(field)
3862 if ignore_when_in_cache and not any(field._cache_missing_ids(self)):
3863 # field is already in cache: don't fetch it
3864 continue
3865 if field.store:
3866 fields_to_fetch.append(field)
3867 else:
3868 # optimization: fetch field dependencies
3869 for dotname in self.pool.field_depends[field]:
3870 dep_field = self._fields[dotname.split('.', 1)[0]]
3871 if (not dep_field.store) or (
3872 dep_field.prefetch is True
3873 and self._has_field_access(dep_field, 'read')
3874 ):
3875 fields_todo.append(dep_field)
3877 return fields_to_fetch
3879 def _fetch_query(self, query: Query, fields: Sequence[Field]) -> Self:
3880 """ Fetch the given fields (iterable of :class:`Field` instances) from
3881 the given query, put them in cache, and return the fetched records.
3883 This method may be overridden to change what fields to actually fetch,
3884 or to change the values that are put in cache.
3885 """
3887 # determine columns fields and those with their own read() method
3888 column_fields: OrderedSet[Field] = OrderedSet()
3889 other_fields: OrderedSet[Field] = OrderedSet()
3890 for field in fields:
3891 if field.name == 'id': 3891 ↛ 3892line 3891 didn't jump to line 3892 because the condition on line 3891 was never true
3892 continue
3893 assert field.store
3894 (column_fields if field.column_type else other_fields).add(field)
3896 context = self.env.context
3898 if column_fields:
3899 # the query may involve several tables: we need fully-qualified names
3900 sql_terms = [SQL.identifier(self._table, 'id')]
3901 for field in column_fields:
3902 if field.type == 'binary' and ( 3902 ↛ 3905line 3902 didn't jump to line 3905 because the condition on line 3902 was never true
3903 context.get('bin_size') or context.get('bin_size_' + field.name)):
3904 # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
3905 sql = self._field_to_sql(self._table, field.name, query)
3906 sql = SQL("pg_size_pretty(length(%s)::bigint)", sql)
3907 else:
3908 sql = self._field_to_sql(self._table, field.name, query)
3909 # flushing is necessary to retrieve the en_US value of fields without a translation
3910 # otherwise, re-create the SQL without flushing
3911 if not field.translate:
3912 to_flush = (f for f in sql.to_flush if f != field)
3913 sql = SQL(sql.code, *sql.params, to_flush=to_flush)
3914 sql_terms.append(sql)
3916 # select the given columns from the rows in the query
3917 rows = self.env.execute_query(query.select(*sql_terms))
3919 if not rows:
3920 return self.browse()
3922 # rows = [(id1, a1, b1), (id2, a2, b2), ...]
3923 # column_values = [(id1, id2, ...), (a1, a2, ...), (b1, b2, ...)]
3924 column_values = zip(*rows)
3925 ids = next(column_values)
3926 fetched = self.browse(ids)
3928 # If we assume that the value of a pending update is in cache, we
3929 # can avoid flushing pending updates if the fetched values do not
3930 # overwrite values in cache.
3931 for field, values in zip(column_fields, column_values, strict=True):
3932 # store values in cache, but without overwriting
3933 field._insert_cache(fetched, values)
3934 else:
3935 fetched = self.browse(query)
3937 # process non-column fields
3938 if fetched:
3939 for field in other_fields:
3940 field.read(fetched)
3942 return fetched
3944 def get_metadata(self) -> list[ValuesType]:
3945 """Return some metadata about the given records.
3947 :returns: list of ownership dictionaries for each requested record with the following keys:
3949 * id: object id
3950 * create_uid: user who created the record
3951 * create_date: date when the record was created
3952 * write_uid: last user who changed the record
3953 * write_date: date of the last change to the record
3954 * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
3955 * xmlids: list of dict with xmlid in format ``module.name``, and noupdate as boolean
3956 * noupdate: A boolean telling if the record will be updated or not
3957 """
3959 IrModelData = self.env['ir.model.data'].sudo()
3960 if self._log_access:
3961 res = self.read(LOG_ACCESS_COLUMNS)
3962 else:
3963 res = [{'id': x} for x in self.ids]
3966 xml_data = defaultdict(list)
3967 imds = IrModelData.search_read(
3968 [('model', '=', self._name), ('res_id', 'in', self.ids)],
3969 ['res_id', 'noupdate', 'module', 'name'],
3970 order='id DESC'
3971 )
3972 for imd in imds:
3973 xml_data[imd['res_id']].append({
3974 'xmlid': "%s.%s" % (imd['module'], imd['name']),
3975 'noupdate': imd['noupdate'],
3976 })
3978 for r in res:
3979 main = xml_data.get(r['id'], [{}])[-1]
3980 r['xmlid'] = main.get('xmlid', False)
3981 r['noupdate'] = main.get('noupdate', False)
3982 r['xmlids'] = xml_data.get(r['id'], [])[::-1]
3983 return res
3985 def get_base_url(self) -> str:
3986 """ Return rooturl for a specific record.
3988 By default, it returns the ir.config.parameter of base_url
3989 but it can be overridden by model.
3991 :return: the base url for this record
3992 """
3993 if len(self) > 1: 3993 ↛ 3994line 3993 didn't jump to line 3994 because the condition on line 3993 was never true
3994 raise ValueError("Expected singleton or no record: %s" % self)
3995 return self.env['ir.config_parameter'].sudo().get_param('web.base.url')
3997 def _check_company_domain(self, companies) -> Domain:
3998 """Domain to be used for company consistency between records regarding this model.
4000 :param companies: the allowed companies for the related record
4001 :type companies: BaseModel or list or tuple or int or unquote
4002 """
4003 if not companies:
4004 return Domain('company_id', '=', False)
4005 if isinstance(companies, unquote):
4006 return Domain('company_id', 'in', unquote(f'{companies} + [False]'))
4007 return Domain('company_id', 'in', to_record_ids(companies) + [False])
4009 def _check_company(self, fnames=None):
4010 """ Check the companies of the values of the given field names.
4012 :param list fnames: names of relational fields to check
4013 :raises UserError: if the `company_id` of the value of any field is not
4014 in `[False, self.company_id]` (or `self` if
4015 :class:`~odoo.addons.base.models.res_company`).
4017 For :class:`~odoo.addons.base.models.res_users` relational fields,
4018 verifies record company is in `company_ids` fields.
4020 User with main company A, having access to company A and B, could be
4021 assigned or linked to records in company B.
4022 """
4023 if fnames is None or {'company_id', 'company_ids'} & set(fnames):
4024 fnames = self._fields
4026 regular_fields = []
4027 property_fields = []
4028 for name in fnames:
4029 field = self._fields[name]
4030 if field.relational and field.check_company:
4031 if not field.company_dependent:
4032 regular_fields.append(name)
4033 else:
4034 property_fields.append(name)
4036 if not (regular_fields or property_fields):
4037 return
4039 inconsistencies = []
4040 for record in self:
4041 # The first part of the check verifies that all records linked via relation fields are compatible
4042 # with the company of the origin document, i.e. `self.account_id.company_id == self.company_id`
4043 if regular_fields:
4044 if self._name == 'res.company':
4045 companies = record
4046 elif 'company_id' in self:
4047 companies = record.company_id
4048 elif 'company_ids' in self: 4048 ↛ 4051line 4048 didn't jump to line 4051 because the condition on line 4048 was always true
4049 companies = record.company_ids
4050 else:
4051 _logger.warning(_(
4052 "Skipping a company check for model %(model_name)s. Its fields %(field_names)s are set as company-dependent, "
4053 "but the model doesn't have a `company_id` or `company_ids` field!",
4054 model_name=self._name, field_names=regular_fields
4055 ))
4056 continue
4057 for name in regular_fields:
4058 corecords = record.sudo()[name]
4059 if corecords:
4060 domain = corecords._check_company_domain(companies)
4061 if domain and corecords != corecords.with_context(active_test=False).filtered_domain(domain): 4061 ↛ 4062line 4061 didn't jump to line 4062 because the condition on line 4061 was never true
4062 inconsistencies.append((record, name, corecords))
4063 # The second part of the check (for property / company-dependent fields) verifies that the records
4064 # linked via those relation fields are compatible with the company that owns the property value, i.e.
4065 # the company for which the value is being assigned, i.e:
4066 # `self.property_account_payable_id.company_id == self.env.company
4067 company = self.env.company
4068 for name in property_fields:
4069 corecords = record.sudo()[name]
4070 if corecords:
4071 domain = corecords._check_company_domain(company)
4072 if domain and corecords != corecords.with_context(active_test=False).filtered_domain(domain): 4072 ↛ 4073line 4072 didn't jump to line 4073 because the condition on line 4072 was never true
4073 inconsistencies.append((record, name, corecords))
4075 if inconsistencies: 4075 ↛ 4076line 4075 didn't jump to line 4076 because the condition on line 4075 was never true
4076 lines = [_("Uh-oh! You’ve got some company inconsistencies here:")]
4077 company_msg = _lt("- Record is company “%(company)s” while “%(field)s” (%(fname)s: %(values)s) belongs to another company.")
4078 record_msg = _lt("- “%(record)s” belongs to company “%(company)s” while “%(field)s” (%(fname)s: %(values)s) belongs to another company.")
4079 root_company_msg = _lt("- Only a root company can be set on “%(record)s”. Currently set to “%(company)s”")
4080 for record, name, corecords in inconsistencies[:5]:
4081 if record._name == 'res.company':
4082 msg, companies = company_msg, record
4083 elif record == corecords and name == 'company_id':
4084 msg, companies = root_company_msg, record.company_id
4085 else:
4086 msg = record_msg
4087 companies = record.company_id if 'company_id' in record else record.company_ids
4088 field = self.env['ir.model.fields']._get(self._name, name)
4089 lines.append(str(msg) % {
4090 'record': record.display_name,
4091 'company': ", ".join(company.display_name for company in companies),
4092 'field': field.field_description,
4093 'fname': field.name,
4094 'values': ", ".join(repr(rec.display_name) for rec in corecords),
4095 })
4096 lines.append(_("To avoid a mess, no company crossover is allowed!"))
4097 raise UserError("\n".join(lines))
4099 @api.private # use has_access
4100 def check_access(self, operation: str) -> None:
4101 """ Verify that the current user is allowed to perform ``operation`` on
4102 all the records in ``self``. The method raises an :class:`AccessError`
4103 if the operation is forbidden on the model in general, or on any record
4104 in ``self``.
4106 In particular, when ``self`` is empty, the method checks whether the
4107 current user has some permission to perform ``operation`` on the model
4108 in general::
4110 # check that user has some minimal permission on the model
4111 records.browse().check_access(operation)
4113 """
4114 if not self.env.su and (result := self._check_access(operation)): 4114 ↛ 4115line 4114 didn't jump to line 4115 because the condition on line 4114 was never true
4115 raise result[1]()
4117 def has_access(self, operation: str) -> bool:
4118 """ Return whether the current user is allowed to perform ``operation``
4119 on all the records in ``self``. The method is fully consistent with
4120 method :meth:`check_access` but returns a boolean instead.
4121 """
4122 return self.env.su or not self._check_access(operation)
4124 def _filtered_access(self, operation: str):
4125 """ Return the subset of ``self`` for which the current user is allowed
4126 to perform ``operation``. The method is fully equivalent to::
4128 self.filtered(lambda record: record.has_access(operation))
4130 """
4131 if self and not self.env.su and (result := self._check_access(operation)): 4131 ↛ 4132line 4131 didn't jump to line 4132 because the condition on line 4131 was never true
4132 return self - result[0]
4133 return self
4135 def _check_access(self, operation: str) -> tuple[Self, Callable] | None:
4136 """ Return ``None`` if the current user has permission to perform
4137 ``operation`` on the records ``self``. Otherwise, return a pair
4138 ``(records, function)`` where ``records`` are the forbidden records, and
4139 ``function`` can be used to create some corresponding exception.
4141 This method provides the base implementation of
4142 methods :meth:`check_access`, :meth:`has_access`
4143 and :meth:`_filtered_access`. The method may be overridden in order to
4144 restrict the access to ``self``.
4145 """
4146 Access = self.env['ir.model.access']
4147 if not Access.check(self._name, operation, raise_exception=False): 4147 ↛ 4148line 4147 didn't jump to line 4148 because the condition on line 4147 was never true
4148 return self, functools.partial(Access._make_access_error, self._name, operation)
4150 # we only check access rules on real records, which should not be mixed
4151 # with new records
4152 if any(self._ids):
4153 Rule = self.env['ir.rule']
4154 domain = Rule._compute_domain(self._name, operation)
4155 if domain and (forbidden := self - self.sudo().with_context(active_test=False).filtered_domain(domain)): 4155 ↛ 4156line 4155 didn't jump to line 4156 because the condition on line 4155 was never true
4156 return forbidden, functools.partial(Rule._make_access_error, operation, forbidden)
4158 return None
4160 @api.model
4161 @api.deprecated("check_access_rights() is deprecated since 18.0; use check_access() instead.")
4162 def check_access_rights(self, operation, raise_exception=True):
4163 """ Verify that the given operation is allowed for the current user accord to ir.model.access.
4165 :param str operation: one of ``create``, ``read``, ``write``, ``unlink``
4166 :param bool raise_exception: whether an exception should be raise if operation is forbidden
4167 :return: whether the operation is allowed
4168 :rtype: bool
4169 :raise AccessError: if the operation is forbidden and raise_exception is True
4170 """
4171 if raise_exception:
4172 return self.browse().check_access(operation)
4173 return self.browse().has_access(operation)
4175 @api.deprecated("check_access_rule() is deprecated since 18.0; use check_access() instead.")
4176 def check_access_rule(self, operation):
4177 """ Verify that the given operation is allowed for the current user according to ir.rules.
4179 :param str operation: one of ``create``, ``read``, ``write``, ``unlink``
4180 :return: None if the operation is allowed
4181 :raise UserError: if current ``ir.rules`` do not permit this operation.
4182 """
4183 self.check_access(operation)
4185 @api.deprecated("_filter_access_rules() is deprecated since 18.0; use _filtered_access() instead.")
4186 def _filter_access_rules(self, operation):
4187 """ Return the subset of ``self`` for which ``operation`` is allowed. """
4188 return self._filtered_access(operation)
4190 @api.deprecated("_filter_access_rules_python() is deprecated since 18.0; use _filtered_access() instead.")
4191 def _filter_access_rules_python(self, operation):
4192 return self._filtered_access(operation)
4194 def unlink(self) -> typing.Literal[True]:
4195 """ Delete the records in ``self``.
4197 :raise AccessError: if the user is not allowed to delete all the given records
4198 :raise UserError: if the record is default property for other records
4199 """
4200 if not self:
4201 return True
4203 self.check_access('unlink')
4205 from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
4206 for func in self._ondelete_methods:
4207 # func._ondelete is True if it should be called during uninstallation
4208 if func._ondelete or not self.env.context.get(MODULE_UNINSTALL_FLAG): 4208 ↛ 4206line 4208 didn't jump to line 4206 because the condition on line 4208 was always true
4209 func(self)
4211 # TOFIX: this avoids an infinite loop when trying to recompute a
4212 # field, which triggers the recomputation of another field using the
4213 # same compute function, which then triggers again the computation
4214 # of those two fields
4215 for field in self._fields.values():
4216 self.env.remove_to_compute(field, self)
4218 self.env.flush_all()
4220 cr = self.env.cr
4221 Data = self.env['ir.model.data'].sudo().with_context({})
4222 Defaults = self.env['ir.default'].sudo()
4223 Attachment = self.env['ir.attachment'].sudo()
4224 ir_model_data_unlink = Data
4225 ir_attachment_unlink = Attachment
4227 # mark fields that depend on 'self' to recompute them after 'self' has
4228 # been deleted (like updating a sum of lines after deleting one line)
4229 with self.env.protecting(self._fields.values(), self):
4230 self.modified(self._fields, before=True)
4232 for sub_ids in split_every(cr.IN_MAX, self.ids):
4233 records = self.browse(sub_ids)
4235 cr.execute(SQL(
4236 "DELETE FROM %s WHERE id IN %s",
4237 SQL.identifier(self._table), sub_ids,
4238 ))
4240 # Removing the ir_model_data reference if the record being deleted
4241 # is a record created by xml/csv file, as these are not connected
4242 # with real database foreign keys, and would be dangling references.
4243 #
4244 # Note: the following steps are performed as superuser to avoid
4245 # access rights restrictions, and with no context to avoid possible
4246 # side-effects during admin calls.
4247 data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])
4248 ir_model_data_unlink |= data
4250 # For the same reason, remove the relevant records in ir_attachment
4251 # (the search is performed with sql as the search method of
4252 # ir_attachment is overridden to hide attachments of deleted
4253 # records)
4254 cr.execute(SQL(
4255 "SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s",
4256 self._name, sub_ids,
4257 ))
4258 ir_attachment_unlink |= Attachment.browse(row[0] for row in cr.fetchall())
4260 # don't allow fallback value in ir.default for many2one company dependent fields to be deleted
4261 # Exception: when MODULE_UNINSTALL_FLAG, these fallbacks can be deleted by Defaults.discard_records(records)
4262 if (many2one_fields := self.env.registry.many2one_company_dependents[self._name]) and not self.env.context.get(MODULE_UNINSTALL_FLAG): 4262 ↛ 4263line 4262 didn't jump to line 4263 because the condition on line 4262 was never true
4263 IrModelFields = self.env["ir.model.fields"]
4264 field_ids = tuple(IrModelFields._get_ids(field.model_name).get(field.name) for field in many2one_fields)
4265 sub_ids_json_text = tuple(json.dumps(id_) for id_ in sub_ids)
4266 if default := Defaults.search([('field_id', 'in', field_ids), ('json_value', 'in', sub_ids_json_text)], limit=1, order='id desc'):
4267 ir_field = default.field_id.sudo()
4268 field = self.env[ir_field.model]._fields[ir_field.name]
4269 record = self.browse(json.loads(default.json_value))
4270 raise UserError(_('Unable to delete %(record)s because it is used as the default value of %(field)s', record=record, field=field))
4272 # on delete set null/restrict for jsonb company dependent many2one
4273 for field in many2one_fields: 4273 ↛ 4274line 4273 didn't jump to line 4274 because the loop on line 4273 never started
4274 model = self.env[field.model_name]
4275 if field.ondelete == 'restrict' and not self.env.context.get(MODULE_UNINSTALL_FLAG):
4276 if res := self.env.execute_query(SQL(
4277 """
4278 SELECT id, %(field)s
4279 FROM %(table)s
4280 WHERE %(field)s IS NOT NULL
4281 AND %(field)s @? %(jsonpath)s
4282 ORDER BY id
4283 LIMIT 1
4284 """,
4285 table=SQL.identifier(model._table),
4286 field=SQL.identifier(field.name),
4287 jsonpath=f"$.* ? ({' || '.join(f'@ == {id_}' for id_ in sub_ids)})",
4288 )):
4289 on_restrict_id, field_json = res[0]
4290 to_delete_id = next(iter(id_ for id_ in field_json.values()))
4291 on_restrict_record = model.browse(on_restrict_id)
4292 to_delete_record = self.browse(to_delete_id)
4293 raise UserError(_('You cannot delete %(to_delete_record)s, as it is used by %(on_restrict_record)s',
4294 to_delete_record=to_delete_record, on_restrict_record=on_restrict_record))
4295 else:
4296 self.env.execute_query(SQL(
4297 """
4298 UPDATE %(table)s
4299 SET %(field)s = (
4300 SELECT jsonb_object_agg(
4301 key,
4302 CASE
4303 WHEN value::int4 in %(ids)s THEN NULL
4304 ELSE value::int4
4305 END)
4306 FROM jsonb_each_text(%(field)s)
4307 )
4308 WHERE %(field)s IS NOT NULL
4309 AND %(field)s @? %(jsonpath)s
4310 """,
4311 table=SQL.identifier(model._table),
4312 field=SQL.identifier(field.name),
4313 ids=sub_ids,
4314 jsonpath=f"$.* ? ({' || '.join(f'@ == {id_}' for id_ in sub_ids)})",
4315 ))
4317 # For the same reason, remove the defaults having some of the
4318 # records as value
4319 Defaults.discard_records(records)
4321 # invalidate the *whole* cache, since the orm does not handle all
4322 # changes made in the database, like cascading delete!
4323 self.env.invalidate_all(flush=False)
4324 if ir_model_data_unlink:
4325 ir_model_data_unlink.unlink()
4326 if ir_attachment_unlink: 4326 ↛ 4327line 4326 didn't jump to line 4327 because the condition on line 4326 was never true
4327 ir_attachment_unlink.unlink()
4329 # auditing: deletions are infrequent and leave no trace in the database
4330 _unlink.info('User #%s deleted %s records with IDs: %r', self.env.uid, self._name, self.ids)
4332 return True
4334 def write(self, vals: ValuesType) -> typing.Literal[True]:
4335 """ Update all records in ``self`` with the provided values.
4337 :param vals: fields to update and the value to set on them
4338 :raise AccessError: if user is not allowed to modify the specified records/fields
4339 :raise ValidationError: if invalid values are specified for selection fields
4340 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
4342 * For numeric fields (:class:`~odoo.fields.Integer`,
4343 :class:`~odoo.fields.Float`) the value should be of the
4344 corresponding type
4345 * For :class:`~odoo.fields.Boolean`, the value should be a
4346 :class:`python:bool`
4347 * For :class:`~odoo.fields.Selection`, the value should match the
4348 selection values (generally :class:`python:str`, sometimes
4349 :class:`python:int`)
4350 * For :class:`~odoo.fields.Many2one`, the value should be the
4351 database identifier of the record to set
4352 * The expected value of a :class:`~odoo.fields.One2many` or
4353 :class:`~odoo.fields.Many2many` relational field is a list of
4354 :class:`~odoo.fields.Command` that manipulate the relation the
4355 implement. There are a total of 7 commands:
4356 :meth:`~odoo.fields.Command.create`,
4357 :meth:`~odoo.fields.Command.update`,
4358 :meth:`~odoo.fields.Command.delete`,
4359 :meth:`~odoo.fields.Command.unlink`,
4360 :meth:`~odoo.fields.Command.link`,
4361 :meth:`~odoo.fields.Command.clear`, and
4362 :meth:`~odoo.fields.Command.set`.
4363 * For :class:`~odoo.fields.Date` and `~odoo.fields.Datetime`,
4364 the value should be either a date(time), or a string.
4366 .. warning::
4368 If a string is provided for Date(time) fields,
4369 it must be UTC-only and formatted according to
4370 :const:`odoo.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
4371 :const:`odoo.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
4373 * Other non-relational fields use a string for value
4374 """
4375 if not self:
4376 return True
4378 self.check_access('write')
4379 for field_name in vals:
4380 try:
4381 self._check_field_access(self._fields[field_name], 'write')
4382 except KeyError as e:
4383 raise ValueError(f"Invalid field {field_name!r} in {self._name!r}") from e
4384 env = self.env
4386 bad_names = {'id', 'parent_path'}
4387 if self._log_access: 4387 ↛ 4393line 4387 didn't jump to line 4393 because the condition on line 4387 was always true
4388 # the superuser can set log_access fields while loading registry
4389 if not (self.env.uid == SUPERUSER_ID and not self.pool.ready):
4390 bad_names.update(LOG_ACCESS_COLUMNS)
4392 # set magic fields
4393 vals = {key: val for key, val in vals.items() if key not in bad_names}
4394 if self._log_access: 4394 ↛ 4398line 4394 didn't jump to line 4398 because the condition on line 4394 was always true
4395 vals.setdefault('write_uid', self.env.uid)
4396 vals.setdefault('write_date', self.env.cr.now())
4398 field_values = [] # [(field, value)]
4399 determine_inverses = defaultdict(list) # {inverse: fields}
4400 fnames_modifying_relations = []
4401 protected = set()
4402 for fname, value in vals.items():
4403 field = self._fields.get(fname)
4404 if not field: 4404 ↛ 4405line 4404 didn't jump to line 4405 because the condition on line 4404 was never true
4405 raise ValueError("Invalid field %r on model %r" % (fname, self._name))
4406 field_values.append((field, value))
4407 if field.inverse:
4408 if field.type in ('one2many', 'many2many'):
4409 # The written value is a list of commands that must applied
4410 # on the field's current value. Because the field is
4411 # protected while being written, the field's current value
4412 # will not be computed and default to an empty recordset. So
4413 # make sure the field's value is in cache before writing, in
4414 # order to avoid an inconsistent update.
4415 self[fname]
4416 determine_inverses[field.inverse].append(field)
4417 if self.pool.is_modifying_relations(field):
4418 fnames_modifying_relations.append(fname)
4419 if field.inverse or (field.compute and not field.readonly):
4420 if field.store or field.type not in ('one2many', 'many2many'):
4421 # Protect the field from being recomputed while being
4422 # inversed. In the case of non-stored x2many fields, the
4423 # field's value may contain unexpeced new records (created
4424 # by command 0). Those new records are necessary for
4425 # inversing the field, but should no longer appear if the
4426 # field is recomputed afterwards. Not protecting the field
4427 # will automatically invalidate the field from the cache,
4428 # forcing its value to be recomputed once dependencies are
4429 # up-to-date.
4430 protected.update(self.pool.field_computed.get(field, [field]))
4432 # force the computation of fields that are computed with some assigned
4433 # fields, but are not assigned themselves
4434 to_compute = [field.name
4435 for field in protected
4436 if field.compute and field.name not in vals]
4437 if to_compute:
4438 self._recompute_recordset(to_compute)
4440 # protect fields being written against recomputation
4441 with env.protecting(protected, self):
4442 # Determine records depending on values. When modifying a relational
4443 # field, you have to recompute what depends on the field's values
4444 # before and after modification. This is because the modification
4445 # has an impact on the "data path" between a computed field and its
4446 # dependency. Note that this double call to modified() is only
4447 # necessary for relational fields.
4448 #
4449 # It is best explained with a simple example: consider two sales
4450 # orders SO1 and SO2. The computed total amount on sales orders
4451 # indirectly depends on the many2one field 'order_id' linking lines
4452 # to their sales order. Now consider the following code:
4453 #
4454 # line = so1.line_ids[0] # pick a line from SO1
4455 # line.order_id = so2 # move the line to SO2
4456 #
4457 # In this situation, the total amount must be recomputed on *both*
4458 # sales order: the line's order before the modification, and the
4459 # line's order after the modification.
4460 self.modified(fnames_modifying_relations, before=True)
4462 real_recs = self.filtered('id')
4464 # field.write_sequence determines a priority for writing on fields.
4465 # Monetary fields need their corresponding currency field in cache
4466 # for rounding values. X2many fields must be written last, because
4467 # they flush other fields when deleting lines.
4468 for field, value in sorted(field_values, key=lambda item: item[0].write_sequence):
4469 field.write(self, value)
4471 # determine records depending on new values
4472 #
4473 # Call modified after write, because the modified can trigger a
4474 # search which can trigger a flush which can trigger a recompute
4475 # which remove the field from the recompute list while all the
4476 # values required for the computation could not be yet in cache.
4477 # e.g. Write on `name` of `res.partner` trigger the recompute of
4478 # `display_name`, which triggers a search on child_ids to find the
4479 # childs to which the display_name must be recomputed, which
4480 # triggers the flush of `display_name` because the _order of
4481 # res.partner includes display_name. The computation of display_name
4482 # is then done too soon because the parent_id was not yet written.
4483 # (`test_01_website_reset_password_tour`)
4484 self.modified(vals)
4486 if self._parent_store and self._parent_name in vals:
4487 self.flush_model([self._parent_name])
4489 # validate non-inversed fields first
4490 inverse_fields = [f.name for fs in determine_inverses.values() for f in fs]
4491 real_recs._validate_fields(vals, inverse_fields)
4493 for fields in determine_inverses.values():
4494 # write again on non-stored fields that have been invalidated from cache
4495 for field in fields:
4496 if not field.store and (not field.inherited or field.type not in ('one2many', 'many2many')) and any(field._cache_missing_ids(real_recs)): 4496 ↛ 4497line 4496 didn't jump to line 4497 because the condition on line 4496 was never true
4497 field.write(real_recs, vals[field.name])
4499 # inverse records that are not being computed
4500 try:
4501 fields[0].determine_inverse(real_recs)
4502 except AccessError as e:
4503 if fields[0].inherited:
4504 description = self.env['ir.model']._get(self._name).name
4505 raise AccessError(_(
4506 "%(previous_message)s\n\nImplicitly accessed through '%(document_kind)s' (%(document_model)s).",
4507 previous_message=e.args[0],
4508 document_kind=description,
4509 document_model=self._name,
4510 ))
4511 raise
4513 # validate inversed fields
4514 real_recs._validate_fields(inverse_fields)
4516 if self._check_company_auto:
4517 self._check_company(list(vals))
4518 return True
4520 def _write(self, vals: ValuesType) -> None:
4521 """ Low-level implementation of write() """
4522 self._write_multi([vals] * len(self))
4524 def _write_multi(self, vals_list: list[ValuesType]) -> None:
4525 """ Low-level implementation of write() """
4526 assert len(self) == len(vals_list)
4528 if not self: 4528 ↛ 4529line 4528 didn't jump to line 4529 because the condition on line 4528 was never true
4529 return
4531 # determine records that require updating parent_path
4532 parent_records = self._parent_store_update_prepare(vals_list)
4534 if self._log_access: 4534 ↛ 4541line 4534 didn't jump to line 4541 because the condition on line 4534 was always true
4535 # set magic fields (already done by write(), but not for computed fields)
4536 log_vals = {'write_uid': self.env.uid, 'write_date': self.env.cr.now()}
4537 vals_list = [(log_vals | vals) for vals in vals_list]
4539 # determine SQL updates, grouped by set of updated fields:
4540 # {(col1, col2, col3): [(id, val1, val2, val3)]}
4541 updates = defaultdict(list)
4542 for record, vals in zip(self, vals_list):
4543 # sort vals.items() by key, then retrieve its keys and values
4544 fnames, row = zip(*sorted(vals.items()))
4545 updates[fnames].append(record._ids + row)
4547 # perform updates (fnames, rows) in batches
4548 updates_list = [
4549 (fnames, sub_rows)
4550 for fnames, rows in updates.items()
4551 for sub_rows in split_every(UPDATE_BATCH_SIZE, rows)
4552 ]
4554 # update columns by group of updated fields
4555 for fnames, rows in updates_list:
4556 columns = []
4557 assignments = []
4558 for fname in fnames:
4559 field = self._fields[fname]
4560 assert field.store and field.column_type
4561 column = SQL.identifier(fname)
4562 # the type cast is necessary for some values, like NULLs
4563 expr = SQL('"__tmp".%s::%s', column, SQL(field.column_type[1]))
4564 if field.translate is True:
4565 # this is the SQL equivalent of:
4566 # None if expr is None else (
4567 # (column or {'en_US': next(iter(expr.values()))}) | expr
4568 # )
4569 expr = SQL(
4570 """CASE WHEN %(expr)s IS NULL THEN NULL ELSE
4571 COALESCE(%(table)s.%(column)s, jsonb_build_object(
4572 'en_US', jsonb_path_query_first(%(expr)s, '$.*')
4573 )) || %(expr)s
4574 END""",
4575 table=SQL.identifier(self._table),
4576 column=column,
4577 expr=expr,
4578 )
4579 if field.company_dependent:
4580 fallbacks = self.env['ir.default']._get_field_column_fallbacks(self._name, fname)
4581 expr = SQL(
4582 """(SELECT jsonb_object_agg(d.key, d.value)
4583 FROM jsonb_each(COALESCE(%(table)s.%(column)s, '{}'::jsonb) || %(expr)s) d
4584 JOIN jsonb_each(%(fallbacks)s) f
4585 ON d.key = f.key AND d.value != f.value)""",
4586 table=SQL.identifier(self._table),
4587 column=column,
4588 expr=expr,
4589 fallbacks=fallbacks
4590 )
4591 columns.append(column)
4592 assignments.append(SQL("%s = %s", column, expr))
4594 self.env.execute_query(SQL(
4595 """ UPDATE %(table)s
4596 SET %(assignments)s
4597 FROM (VALUES %(values)s) AS "__tmp"("id", %(columns)s)
4598 WHERE %(table)s."id" = "__tmp"."id"
4599 """,
4600 table=SQL.identifier(self._table),
4601 assignments=SQL(", ").join(assignments),
4602 values=SQL(", ").join(rows),
4603 columns=SQL(", ").join(columns),
4604 ))
4606 # update parent_path
4607 if parent_records:
4608 parent_records._parent_store_update()
4610 @api.model_create_multi
4611 def create(self, vals_list: list[ValuesType]) -> Self:
4612 """Create new records for the model.
4614 The new records are initialized using the values from the list of dicts
4615 ``vals_list``, and if necessary those from :meth:`~.default_get`.
4617 :param vals_list:
4618 values for the model's fields, as a list of dictionaries::
4620 [{'field_name': field_value, ...}, ...]
4622 For backward compatibility, ``vals_list`` may be a dictionary.
4623 It is treated as a singleton list ``[vals]``, and a single record
4624 is returned.
4626 see :meth:`~.write` for details
4628 :return: the created records
4629 :raise AccessError: if the current user is not allowed to create records of the specified model
4630 :raise ValidationError: if user tries to enter invalid value for a selection field
4631 :raise ValueError: if a field name specified in the create values does not exist.
4632 :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation
4633 (such as setting an object as its own parent)
4634 """
4635 assert isinstance(vals_list, (list, tuple))
4636 if not vals_list:
4637 return self.browse()
4639 self = self.browse()
4640 self.check_access('create')
4642 # check access to all user-provided fields
4643 field_names = OrderedSet(fname for vals in vals_list for fname in vals)
4644 field_names.update(
4645 field_name
4646 for context_key in self.env.context
4647 if context_key.startswith('default_')
4648 and (field_name := context_key[8:])
4649 and field_name in self._fields
4650 )
4651 for field_name in field_names:
4652 field = self._fields.get(field_name)
4653 if field is None: 4653 ↛ 4654line 4653 didn't jump to line 4654 because the condition on line 4653 was never true
4654 raise ValueError(f"Invalid field {field_name!r} in {self._name!r}")
4655 self._check_field_access(field, 'write')
4657 new_vals_list = self._prepare_create_values(vals_list)
4659 # classify fields for each record
4660 data_list = []
4661 determine_inverses = defaultdict(OrderedSet) # {inverse: fields}
4663 for vals in new_vals_list:
4664 precomputed = vals.pop('__precomputed__', ())
4666 # distribute fields into sets for various purposes
4667 data = {}
4668 data['stored'] = stored = {}
4669 data['inversed'] = inversed = {}
4670 data['inherited'] = inherited = defaultdict(dict)
4671 data['protected'] = protected = set()
4672 for key, val in vals.items():
4673 field = self._fields.get(key)
4674 if not field: 4674 ↛ 4675line 4674 didn't jump to line 4675 because the condition on line 4674 was never true
4675 raise ValueError("Invalid field %r on model %r" % (key, self._name))
4676 if field.store:
4677 stored[key] = val
4678 if field.inherited:
4679 inherited[field.related_field.model_name][key] = val
4680 elif field.inverse and field not in precomputed:
4681 inversed[key] = val
4682 determine_inverses[field.inverse].add(field)
4683 # protect editable computed fields and precomputed fields
4684 # against (re)computation
4685 if field.compute and (not field.readonly or field.precompute):
4686 protected.update(self.pool.field_computed.get(field, [field]))
4688 data_list.append(data)
4690 # create or update parent records
4691 for model_name, parent_name in self._inherits.items():
4692 parent_data_list = []
4693 for data in data_list:
4694 if not data['stored'].get(parent_name):
4695 parent_data_list.append(data)
4696 elif data['inherited'][model_name]:
4697 parent = self.env[model_name].browse(data['stored'][parent_name])
4698 parent.write(data['inherited'][model_name])
4700 if parent_data_list:
4701 parents = self.env[model_name].create([
4702 data['inherited'][model_name]
4703 for data in parent_data_list
4704 ])
4705 for parent, data in zip(parents, parent_data_list):
4706 data['stored'][parent_name] = parent.id
4708 # create records with stored fields
4709 records = self._create(data_list)
4711 # protect fields being written against recomputation
4712 protected_fields = [(data['protected'], data['record']) for data in data_list]
4713 with self.env.protecting(protected_fields):
4714 # call inverse method for each group of fields
4715 for fields in determine_inverses.values():
4716 # determine which records to inverse for those fields
4717 inv_names = {field.name for field in fields}
4718 inv_rec_ids = []
4719 for data in data_list:
4720 if inv_names.isdisjoint(data['inversed']):
4721 continue
4722 record = data['record']
4723 record._update_cache({
4724 fname: value
4725 for fname, value in data['inversed'].items()
4726 if fname in inv_names and fname not in data['stored']
4727 })
4728 inv_rec_ids.append(record.id)
4730 inv_records = self.browse(inv_rec_ids)
4731 next(iter(fields)).determine_inverse(inv_records)
4732 # Values of non-stored fields were cached before running inverse methods. In case of x2many create
4733 # commands, the cache may therefore hold NewId records. We must now invalidate those values.
4734 inv_relational_fnames = [field.name for field in fields if field.type in ('one2many', 'many2many') and not field.store]
4735 inv_records.invalidate_recordset(fnames=inv_relational_fnames)
4737 # check Python constraints for non-stored inversed fields
4738 for data in data_list:
4739 data['record']._validate_fields(data['inversed'], data['stored'])
4741 if self._check_company_auto:
4742 records._check_company()
4744 import_module = self.env.context.get('_import_current_module')
4745 if not import_module: # not an import -> bail
4746 return records
4748 # It is to support setting xids directly in create by
4749 # providing an "id" key (otherwise stripped by create) during an import
4750 # (which should strip 'id' from the input data anyway)
4751 noupdate = self.env.context.get('noupdate', False)
4753 xids = (v.get('id') for v in vals_list)
4754 self.env['ir.model.data']._update_xmlids([
4755 {
4756 'xml_id': xid if '.' in xid else ('%s.%s' % (import_module, xid)),
4757 'record': rec,
4758 # note: this is not used when updating o2ms above...
4759 'noupdate': noupdate,
4760 }
4761 for rec, xid in zip(records, xids)
4762 if xid and isinstance(xid, str)
4763 ])
4765 return records
4767 def _prepare_create_values(self, vals_list: list[ValuesType]) -> list[ValuesType]:
4768 """ Clean up and complete the given create values, and return a list of
4769 new vals containing:
4771 * default values,
4772 * discarded forbidden values (magic fields),
4773 * precomputed fields.
4775 :param vals_list: List of create values
4776 :returns: new list of completed create values
4777 """
4778 bad_names = ['id', 'parent_path']
4779 if self._log_access:
4780 # the superuser can set log_access fields while loading registry
4781 if not (self.env.uid == SUPERUSER_ID and not self.pool.ready):
4782 bad_names.extend(LOG_ACCESS_COLUMNS)
4784 # also discard precomputed readonly fields (to force their computation)
4785 bad_names.extend(
4786 fname
4787 for fname, field in self._fields.items()
4788 if field.precompute and field.readonly
4789 )
4791 result_vals_list = []
4792 for vals in vals_list:
4793 # add default values
4794 vals = self._add_missing_default_values(vals)
4796 # add magic fields
4797 for fname in bad_names:
4798 vals.pop(fname, None)
4799 if self._log_access:
4800 vals.setdefault('create_uid', self.env.uid)
4801 vals.setdefault('create_date', self.env.cr.now())
4802 vals.setdefault('write_uid', self.env.uid)
4803 vals.setdefault('write_date', self.env.cr.now())
4805 result_vals_list.append(vals)
4807 # add precomputed fields
4808 self._add_precomputed_values(result_vals_list)
4810 return result_vals_list
4812 def _add_precomputed_values(self, vals_list: list[ValuesType]) -> None:
4813 """ Add missing precomputed fields to ``vals_list`` values.
4814 Only applies for precompute=True fields.
4815 """
4816 precomputable = {
4817 fname: field
4818 for fname, field in self._fields.items()
4819 if field.precompute
4820 }
4821 if not precomputable:
4822 return
4824 # determine which vals must be completed
4825 vals_list_todo = [
4826 vals
4827 for vals in vals_list
4828 if any(fname not in vals for fname in precomputable)
4829 ]
4830 if not vals_list_todo:
4831 return
4833 # create new records for the vals that must be completed
4834 records = self.browse().concat(*(self.new(vals) for vals in vals_list_todo))
4836 for record, vals in zip(records, vals_list_todo):
4837 vals['__precomputed__'] = precomputed = set()
4838 for fname, field in precomputable.items():
4839 if fname not in vals:
4840 # computed stored fields with a column
4841 # have to be computed before create
4842 # s.t. required and constraints can be applied on those fields.
4843 vals[fname] = field.convert_to_write(record[fname], self)
4844 precomputed.add(field)
4846 @api.model
4847 def _create(self, data_list: list[ValuesType]) -> Self:
4848 """ Create records from the stored field values in ``data_list``. """
4849 assert data_list
4850 cr = self.env.cr
4852 # insert rows in batches of maximum INSERT_BATCH_SIZE
4853 ids: list[int] = [] # ids of created records
4854 other_fields: OrderedSet[Field] = OrderedSet() # non-column fields
4856 for data_sublist in split_every(INSERT_BATCH_SIZE, data_list):
4857 stored_list = [data['stored'] for data in data_sublist]
4858 fnames = sorted({name for stored in stored_list for name in stored})
4860 columns: list[str] = []
4861 rows: list[list[typing.Any]] = [[] for _ in stored_list]
4862 for fname in fnames:
4863 field = self._fields[fname]
4864 if field.column_type:
4865 columns.append(fname)
4866 for stored, row in zip(stored_list, rows):
4867 if fname in stored:
4868 row.append(field.convert_to_column_insert(stored[fname], self, stored))
4869 else:
4870 row.append(SQL_DEFAULT)
4871 else:
4872 other_fields.add(field)
4874 if field.type == 'properties':
4875 # force calling fields.create for properties field because
4876 # we might want to update the parent definition
4877 other_fields.add(field)
4879 if not columns: 4879 ↛ 4881line 4879 didn't jump to line 4881 because the condition on line 4879 was never true
4880 # manage the case where we create empty records
4881 columns = ['id']
4882 for row in rows:
4883 row.append(SQL_DEFAULT)
4885 cr.execute(SQL(
4886 'INSERT INTO %s (%s) VALUES %s RETURNING "id"',
4887 SQL.identifier(self._table),
4888 SQL(', ').join(map(SQL.identifier, columns)),
4889 SQL(', ').join(tuple(row) for row in rows),
4890 ))
4891 ids.extend(id_ for id_, in cr.fetchall())
4893 # put the new records in cache, and update inverse fields, for many2one
4894 # (using bin_size=False to put binary values in the right place)
4895 records = self.browse(ids)
4896 inverses_update = defaultdict(list) # {(field, value): ids}
4897 common_set_vals = set(LOG_ACCESS_COLUMNS + ['id', 'parent_path'])
4898 for data, record in zip(data_list, records.with_context(bin_size=False)):
4899 data['record'] = record
4900 # DLE P104: test_inherit.py, test_50_search_one2many
4901 vals = dict({k: v for d in data['inherited'].values() for k, v in d.items()}, **data['stored'])
4902 set_vals = common_set_vals.union(vals)
4904 # put None in cache for all fields that are not part of the INSERT
4905 for field in self._fields.values():
4906 if not field.store:
4907 continue
4908 if field.type in ('one2many', 'many2many'):
4909 field._update_cache(record, ())
4910 elif field.name not in set_vals:
4911 field._update_cache(record, None)
4913 for fname, value in vals.items():
4914 field = self._fields[fname]
4915 if field.type not in ('one2many', 'many2many', 'html'):
4916 cache_value = field.convert_to_cache(value, record)
4917 field._update_cache(record, cache_value)
4918 if field.type in ('many2one', 'many2one_reference') and self.pool.field_inverses[field]:
4919 inverses_update[(field, cache_value)].append(record.id)
4921 for (field, value), record_ids in inverses_update.items():
4922 field._update_inverses(self.browse(record_ids), value)
4924 # update parent_path
4925 records._parent_store_create()
4927 # protect fields being written against recomputation
4928 protected = [(data['protected'], data['record']) for data in data_list]
4929 with self.env.protecting(protected):
4930 # mark computed fields as todo
4931 records.modified(self._fields, create=True)
4933 if other_fields:
4934 # discard default values from context for other fields
4935 others = records.with_context(clean_context(self.env.context))
4936 for field in sorted(other_fields, key=attrgetter('_sequence')):
4937 field.create([
4938 (other, data['stored'][field.name])
4939 for other, data in zip(others, data_list)
4940 if field.name in data['stored']
4941 ])
4943 # mark fields to recompute
4944 records.modified([field.name for field in other_fields], create=True)
4946 # check Python constraints for stored fields
4947 records._validate_fields(name for data in data_list for name in data['stored'])
4948 records.check_access('create')
4949 return records
4951 def _compute_field_value(self, field: Field) -> None:
4952 determine(field.compute, self)
4954 if field.store and any(self._ids):
4955 # check constraints of the fields that have been computed
4956 fnames = [f.name for f in self.pool.field_computed[field]]
4957 self.filtered('id')._validate_fields(fnames)
4959 def _parent_store_create(self) -> None:
4960 """ Set the parent_path field on ``self`` after its creation. """
4961 if not self._parent_store:
4962 return
4964 updated = self.env.execute_query(SQL(
4965 """ UPDATE %(table)s node
4966 SET parent_path=concat((
4967 SELECT parent.parent_path
4968 FROM %(table)s parent
4969 WHERE parent.id=node.%(parent)s
4970 ), node.id, '/')
4971 WHERE node.id IN %(ids)s
4972 RETURNING node.id, node.parent_path """,
4973 table=SQL.identifier(self._table),
4974 parent=SQL.identifier(self._parent_name),
4975 ids=tuple(self.ids),
4976 ))
4978 # update the cache of updated nodes, and determine what to recompute
4979 field = self._fields['parent_path']
4980 for id_, path in updated:
4981 field._update_cache(self.browse(id_), path)
4983 def _parent_store_update_prepare(self, vals_list: list[ValuesType]) -> Self:
4984 """ Return the records in ``self`` that must update their parent_path
4985 field. This must be called before updating the parent field.
4986 """
4987 if not self._parent_store:
4988 return self.browse()
4990 # associate each new parent_id to its corresponding record ids
4991 parent_to_ids = defaultdict(list)
4992 for id_, vals in zip(self._ids, vals_list):
4993 if self._parent_name in vals:
4994 parent_to_ids[vals[self._parent_name]].append(id_)
4996 if not parent_to_ids:
4997 return self.browse()
4999 self.flush_recordset([self._parent_name])
5001 # return the records for which the parent field will change
5002 sql_parent = SQL.identifier(self._parent_name)
5003 conditions = []
5004 for parent_id, ids in parent_to_ids.items():
5005 if parent_id: 5005 ↛ 5008line 5005 didn't jump to line 5008 because the condition on line 5005 was always true
5006 condition = SQL('(%s != %s OR %s IS NULL)', sql_parent, parent_id, sql_parent)
5007 else:
5008 condition = SQL('%s IS NOT NULL', sql_parent)
5009 conditions.append(SQL('("id" IN %s AND %s)', tuple(ids), condition))
5011 rows = self.env.execute_query(SQL(
5012 "SELECT id FROM %s WHERE %s ORDER BY id",
5013 SQL.identifier(self._table),
5014 SQL(" OR ").join(conditions),
5015 ))
5016 return self.browse(row[0] for row in rows)
5018 def _parent_store_update(self) -> None:
5019 """ Update the parent_path field of ``self``. """
5020 for parent, records in self.grouped(self._parent_name).items():
5021 # determine new prefix of parent_path of records
5022 prefix = parent.parent_path or ""
5024 # check for recursion
5025 if prefix: 5025 ↛ 5031line 5025 didn't jump to line 5031 because the condition on line 5025 was always true
5026 parent_ids = {int(label) for label in prefix.split('/')[:-1]}
5027 if not parent_ids.isdisjoint(records._ids): 5027 ↛ 5028line 5027 didn't jump to line 5028 because the condition on line 5027 was never true
5028 raise UserError(_("Recursion Detected."))
5030 # update parent_path of all records and their descendants
5031 updated = dict(self.env.execute_query(SQL(
5032 """ UPDATE %(table)s child
5033 SET parent_path = concat(%(prefix)s, substr(child.parent_path,
5034 length(node.parent_path) - length(node.id || '/') + 1))
5035 FROM %(table)s node
5036 WHERE node.id IN %(ids)s
5037 AND child.parent_path LIKE concat(node.parent_path, %(wildcard)s)
5038 RETURNING child.id, child.parent_path """,
5039 table=SQL.identifier(self._table),
5040 prefix=prefix,
5041 ids=tuple(records.ids),
5042 wildcard='%',
5043 )))
5045 # update the cache of updated nodes, and determine what to recompute
5046 field = self._fields['parent_path']
5047 for id_, path in updated.items():
5048 field._update_cache(self.browse(id_), path)
5049 records = self.browse(updated)
5050 records.modified(['parent_path'])
5052 def _clean_properties(self) -> None:
5053 """ Remove all properties of ``self`` that are no longer in the related definition """
5054 for fname, field in self._fields.items():
5055 if field.type != 'properties':
5056 continue
5057 for record in self:
5058 old_value = record[fname]._values
5059 if not old_value:
5060 continue
5062 definitions = field._get_properties_definition(record)
5063 all_names = {definition['name'] for definition in definitions}
5064 new_values = {name: value for name, value in old_value.items() if name in all_names}
5065 if len(new_values) != len(old_value): 5065 ↛ 5066line 5065 didn't jump to line 5066 because the condition on line 5065 was never true
5066 record[fname] = new_values
5068 def _validate_properties_definition(self, properties_definition, field):
5069 """Allow to validate additional properties attributes."""
5071 def _additional_allowed_keys_properties_definition(self):
5072 """Allow to add more allowed key for properties."""
5073 return ()
5075 def _convert_to_cache_properties_definition(self, value):
5076 """Allow to patch `convert_to_cache` of the properties definition."""
5077 return value
5079 def _convert_to_column_properties_definition(self, value):
5080 """Allow to patch `convert_to_column` of the properties definition."""
5081 return value
5083 def _load_records_write(self, values):
5084 self.ensure_one()
5085 to_write = {} # Deferred the write to avoid using the old definition if it changed
5086 for fname in list(values):
5087 if fname not in self._fields or self._fields[fname].type != 'properties': 5087 ↛ 5089line 5087 didn't jump to line 5089 because the condition on line 5087 was always true
5088 continue
5089 field_converter = self._fields[fname].convert_to_cache
5090 to_write[fname] = dict(self[fname]._values or {}, **field_converter(values.pop(fname), self, validate=False))
5092 self.write(values)
5093 if to_write: 5093 ↛ 5094line 5093 didn't jump to line 5094 because the condition on line 5093 was never true
5094 self.write(to_write)
5095 # Because we don't know which properties was linked to which definition,
5096 # we can know clean properties (note that it is not mandatory, we can wait
5097 # that client change the record in a Form view)
5098 self._clean_properties()
5100 def _load_records_create(self, vals_list):
5101 records = self.create(vals_list)
5102 if any(field.type == 'properties' for field in self._fields.values()):
5103 records._clean_properties()
5104 return records
5106 def _load_records(self, data_list, update=False):
5107 """ Create or update records of this model, and assign XMLIDs.
5109 :param data_list: list of dicts with keys `xml_id` (XMLID to
5110 assign), `noupdate` (flag on XMLID), `values` (field values)
5111 :param update: should be ``True`` when upgrading a module
5113 :return: the records corresponding to ``data_list``
5114 """
5115 original_self = self.browse()
5117 imd = self.env['ir.model.data'].sudo()
5119 # The algorithm below partitions 'data_list' into three sets: the ones
5120 # to create, the ones to update, and the others. For each set, we assign
5121 # data['record'] for each data. All those records are then retrieved for
5122 # the result.
5124 # determine existing xml_ids
5125 xml_ids = [data['xml_id'] for data in data_list if data.get('xml_id')]
5126 existing = {
5127 ("%s.%s" % row[1:3]): row
5128 for row in imd._lookup_xmlids(xml_ids, self)
5129 }
5131 # determine which records to create and update
5132 to_create = [] # list of data
5133 to_update = [] # list of data
5134 imd_data_list = [] # list of data for _update_xmlids()
5136 for data in data_list:
5137 xml_id = data.get('xml_id')
5138 if not xml_id:
5139 vals = data['values']
5140 if vals.get('id'): 5140 ↛ 5143line 5140 didn't jump to line 5143 because the condition on line 5140 was always true
5141 data['record'] = self.browse(vals['id'])
5142 to_update.append(data)
5143 elif not update:
5144 to_create.append(data)
5145 else:
5146 raise ValidationError(_("Cannot update a record without specifying its id or xml_id"))
5147 continue
5148 row = existing.get(xml_id)
5149 if not row:
5150 to_create.append(data)
5151 continue
5152 d_id, d_module, d_name, d_model, d_res_id, d_noupdate, r_id = row
5153 if self._name != d_model: 5153 ↛ 5154line 5153 didn't jump to line 5154 because the condition on line 5153 was never true
5154 raise ValidationError( # pylint: disable=missing-gettext
5155 f"For external id {xml_id} "
5156 f"when trying to create/update a record of model {self._name} "
5157 f"found record of different model {d_model} ({d_id})"
5158 )
5159 record = self.browse(d_res_id)
5160 if r_id: 5160 ↛ 5166line 5160 didn't jump to line 5166 because the condition on line 5160 was always true
5161 data['record'] = record
5162 imd_data_list.append(data)
5163 if not (update and d_noupdate): 5163 ↛ 5136line 5163 didn't jump to line 5136 because the condition on line 5163 was always true
5164 to_update.append(data)
5165 else:
5166 imd.browse(d_id).unlink()
5167 to_create.append(data)
5169 # update existing records
5170 for data in to_update:
5171 data['record']._load_records_write(data['values'])
5173 # check for records to create with an XMLID from another module
5174 module = self.env.context.get('install_module')
5175 if module:
5176 prefix = module + "."
5177 for data in to_create:
5178 if data.get('xml_id') and not data['xml_id'].startswith(prefix) and not self.env.context.get('foreign_record_to_create'): 5178 ↛ 5179line 5178 didn't jump to line 5179 because the condition on line 5178 was never true
5179 _logger.warning("Creating record %s in module %s.", data['xml_id'], module)
5181 if self.env.context.get('import_file'): 5181 ↛ 5182line 5181 didn't jump to line 5182 because the condition on line 5181 was never true
5182 existing_modules = self.env['ir.module.module'].sudo().search([]).mapped('name')
5183 for data in to_create:
5184 xml_id = data.get('xml_id')
5185 if xml_id and not data.get('noupdate'):
5186 module_name, sep, record_id = xml_id.partition('.')
5187 if sep and module_name in existing_modules:
5188 raise UserError(
5189 _("The record %(xml_id)s has the module prefix %(module_name)s. This is the part before the '.' in the external id. Because the prefix refers to an existing module, the record would be deleted when the module is upgraded. Use either no prefix and no dot or a prefix that isn't an existing module. For example, __import__, resulting in the external id __import__.%(record_id)s.",
5190 xml_id=xml_id, module_name=module_name, record_id=record_id))
5192 # create records
5193 if to_create:
5194 records = self._load_records_create([data['values'] for data in to_create])
5195 for data, record in zip(to_create, records):
5196 data['record'] = record
5197 if data.get('xml_id'): 5197 ↛ 5195line 5197 didn't jump to line 5195 because the condition on line 5197 was always true
5198 # add XML ids for parent records that have just been created
5199 for parent_model, parent_field in self._inherits.items():
5200 if not data['values'].get(parent_field):
5201 imd_data_list.append({
5202 'xml_id': f"{data['xml_id']}_{parent_model.replace('.', '_')}",
5203 'record': record[parent_field],
5204 'noupdate': data.get('noupdate', False),
5205 })
5206 imd_data_list.append(data)
5208 # create or update XMLIDs
5209 imd._update_xmlids(imd_data_list, update)
5211 return original_self.concat(*(data['record'] for data in data_list))
5213 def _check_qorder(self, word: str) -> None:
5214 if not regex_order.match(word): 5214 ↛ 5215line 5214 didn't jump to line 5215 because the condition on line 5214 was never true
5215 raise UserError(_(
5216 "Invalid \"order\" specified (%s)."
5217 " A valid \"order\" specification is a comma-separated list of valid field names"
5218 " (optionally followed by asc/desc for the direction)",
5219 word,
5220 ))
5222 def _order_to_sql(self, order: str, query: Query, alias: (str | None) = None,
5223 reverse: bool = False) -> SQL:
5224 """ Return an :class:`SQL` object that represents the given ORDER BY
5225 clause, without the ORDER BY keyword. The method also checks whether
5226 the fields in the order are accessible for reading.
5227 """
5228 order = order or self._order
5229 if not order: 5229 ↛ 5230line 5229 didn't jump to line 5230 because the condition on line 5229 was never true
5230 return SQL()
5231 self._check_qorder(order)
5233 alias = alias or self._table
5235 terms = []
5236 for order_part in order.split(','):
5237 order_match = regex_order.match(order_part)
5238 assert order_match is not None, "No match found"
5239 field_name = order_match['field']
5241 direction = (order_match['direction'] or '').upper()
5242 nulls = (order_match['nulls'] or '').upper()
5243 if reverse:
5244 direction = 'ASC' if direction == 'DESC' else 'DESC'
5245 if nulls: 5245 ↛ 5246line 5245 didn't jump to line 5246 because the condition on line 5245 was never true
5246 nulls = 'NULLS LAST' if nulls == 'NULLS FIRST' else 'NULLS FIRST'
5248 sql_direction = SQL(direction) if direction in ('ASC', 'DESC') else SQL()
5249 sql_nulls = SQL(nulls) if nulls in ('NULLS FIRST', 'NULLS LAST') else SQL()
5251 if property_name := order_match['property']:
5252 # field_name is an expression
5253 field_name = f"{field_name}.{property_name}"
5254 term = self._order_field_to_sql(alias, field_name, sql_direction, sql_nulls, query)
5255 if term: 5255 ↛ 5236line 5255 didn't jump to line 5236 because the condition on line 5255 was always true
5256 terms.append(term)
5258 return SQL(", ").join(terms)
5260 def _order_field_to_sql(self, alias: str, field_name: str, direction: SQL,
5261 nulls: SQL, query: Query) -> SQL:
5262 """ Return an :class:`SQL` object that represents the ordering by the
5263 given field. The method also checks whether the field is accessible for
5264 reading.
5266 :param direction: one of ``SQL("ASC")``, ``SQL("DESC")``, ``SQL()``
5267 :param nulls: one of ``SQL("NULLS FIRST")``, ``SQL("NULLS LAST")``, ``SQL()``
5268 """
5269 # field_name is an expression
5270 fname, property_name = parse_field_expr(field_name)
5271 field = self._fields.get(fname)
5272 if not field: 5272 ↛ 5273line 5272 didn't jump to line 5273 because the condition on line 5272 was never true
5273 raise ValueError(f"Invalid field {fname!r} on model {self._name!r}")
5275 if field.type == 'many2one':
5276 seen = self.env.context.get('__m2o_order_seen', ())
5277 if field in seen: 5277 ↛ 5278line 5277 didn't jump to line 5278 because the condition on line 5277 was never true
5278 return SQL()
5279 self = self.with_context(__m2o_order_seen=frozenset((field, *seen)))
5281 # figure out the applicable order_by for the m2o
5282 # special case: ordering by "x_id.id" doesn't recurse on x_id's comodel
5283 comodel = self.env[field.comodel_name]
5284 if property_name == 'id':
5285 coorder = 'id'
5286 sql_field = self._field_to_sql(alias, fname, query)
5287 else:
5288 coorder = comodel._order
5289 sql_field = self._field_to_sql(alias, field_name, query)
5291 if coorder == 'id':
5292 query._order_groupby.append(sql_field)
5293 return SQL("%s %s %s", sql_field, direction, nulls)
5295 # instead of ordering by the field's raw value, use the comodel's
5296 # order on many2one values
5297 terms = []
5298 if nulls.code == 'NULLS FIRST': 5298 ↛ 5299line 5298 didn't jump to line 5299 because the condition on line 5298 was never true
5299 terms.append(SQL("%s IS NOT NULL", sql_field))
5300 elif nulls.code == 'NULLS LAST': 5300 ↛ 5301line 5300 didn't jump to line 5301 because the condition on line 5300 was never true
5301 terms.append(SQL("%s IS NULL", sql_field))
5303 # LEFT JOIN the comodel table, in order to include NULL values, too
5304 _comodel, coalias = field.join(self, alias, query)
5306 # delegate the order to the comodel
5307 reverse = direction.code == 'DESC'
5308 term = comodel._order_to_sql(coorder, query, alias=coalias, reverse=reverse)
5309 if term: 5309 ↛ 5311line 5309 didn't jump to line 5311 because the condition on line 5309 was always true
5310 terms.append(term)
5311 return SQL(", ").join(terms)
5313 sql_field = self._field_to_sql(alias, field_name, query)
5314 if field.type == 'boolean':
5315 sql_field = SQL("COALESCE(%s, FALSE)", sql_field)
5317 query._order_groupby.append(sql_field)
5319 return SQL("%s %s %s", sql_field, direction, nulls)
5321 @api.model
5322 def _search(
5323 self,
5324 domain: DomainType,
5325 offset: int = 0,
5326 limit: int | None = None,
5327 order: str | None = None,
5328 *,
5329 active_test: bool = True,
5330 bypass_access: bool = False,
5331 ) -> Query:
5332 """
5333 Private implementation of search() method.
5335 No default order is applied when the method is invoked without parameter ``order``.
5337 :return: a :class:`Query` object that represents the matching records
5339 This method may be overridden to modify the domain being searched, or to
5340 do some post-filtering of the resulting query object. Be careful with
5341 the latter option, though, as it might hurt performance. Indeed, by
5342 default the returned query object is not actually executed, and it can
5343 be injected as a value in a domain in order to generate sub-queries.
5345 The `active_test` flag specifies whether to filter only active records.
5346 The `bypass_access` controls whether or not permissions should be
5347 checked on the model and record rules should be applied.
5348 """
5349 check_access = not (self.env.su or bypass_access)
5350 if check_access: 5350 ↛ 5351line 5350 didn't jump to line 5351 because the condition on line 5350 was never true
5351 self.browse().check_access('read')
5353 domain = Domain(domain)
5354 # inactive records unless they were explicitly asked for
5355 if (
5356 self._active_name
5357 and active_test
5358 and self.env.context.get('active_test', True)
5359 and not any(leaf.field_expr == self._active_name for leaf in domain.iter_conditions())
5360 ):
5361 domain &= Domain(self._active_name, '=', True)
5363 # build the query
5364 domain = domain.optimize_full(self)
5365 if domain.is_false():
5366 return self.browse()._as_query()
5367 query = Query(self.env, self._table, self._table_sql)
5368 if not domain.is_true():
5369 query.add_where(domain._to_sql(self, self._table, query))
5371 # security access domain
5372 if check_access: 5372 ↛ 5373line 5372 didn't jump to line 5373 because the condition on line 5372 was never true
5373 self_sudo = self.sudo().with_context(active_test=False)
5374 sec_domain = self.env['ir.rule']._compute_domain(self._name, 'read')
5375 sec_domain = sec_domain.optimize_full(self_sudo)
5376 if sec_domain.is_false():
5377 return self.browse()._as_query()
5378 if not sec_domain.is_true():
5379 query.add_where(sec_domain._to_sql(self_sudo, self._table, query))
5381 # add order and limits
5382 if order:
5383 query.order = self._order_to_sql(order, query)
5385 # In RPC, None is not available; False is used instead to mean "no limit"
5386 # Note: True is kept for backward-compatibility (treated as 1)
5387 if limit is not None and limit is not False:
5388 query.limit = limit
5389 if offset is not None: 5389 ↛ 5392line 5389 didn't jump to line 5392 because the condition on line 5389 was always true
5390 query.offset = offset
5392 return query
5394 def _as_query(self, ordered: bool = True) -> Query:
5395 """ Return a :class:`Query` that corresponds to the recordset ``self``.
5396 This method is convenient for making a query object with a known result.
5398 :param ordered: whether the recordset order must be enforced by the query
5399 """
5400 query = Query(self.env, self._table, self._table_sql)
5401 query.set_result_ids(self._ids, ordered)
5402 return query
5404 def copy_data(self, default: ValuesType | None = None) -> list[ValuesType]:
5405 """
5406 Copy given record's data with all its fields values
5408 :param default: field values to override in the original values of the copied record
5409 :return: list of dictionaries containing all the field values
5410 """
5411 vals_list = []
5412 default = dict(default or {})
5413 # avoid recursion through already copied records in case of circular relationship
5414 if '__copy_data_seen' not in self.env.context:
5415 self = self.with_context(__copy_data_seen=defaultdict(set))
5417 # build a black list of fields that should not be copied
5418 blacklist = set(MAGIC_COLUMNS + ['parent_path'])
5419 whitelist = set(name for name, field in self._fields.items() if not field.inherited)
5421 def blacklist_given_fields(model):
5422 # blacklist the fields that are given by inheritance
5423 for parent_model, parent_field in model._inherits.items():
5424 blacklist.add(parent_field)
5425 if parent_field in default:
5426 # all the fields of 'parent_model' are given by the record:
5427 # default[parent_field], except the ones redefined in self
5428 blacklist.update(set(self.env[parent_model]._fields) - whitelist)
5429 else:
5430 blacklist_given_fields(self.env[parent_model])
5432 blacklist_given_fields(self)
5434 fields_to_copy = {name: field
5435 for name, field in self._fields.items()
5436 if field.copy and name not in default and name not in blacklist}
5438 for record in self:
5439 seen_map = self.env.context['__copy_data_seen']
5440 if record.id in seen_map[record._name]: 5440 ↛ 5441line 5440 didn't jump to line 5441 because the condition on line 5440 was never true
5441 vals_list.append(None)
5442 continue
5443 seen_map[record._name].add(record.id)
5445 vals = default.copy()
5447 for name, field in fields_to_copy.items():
5448 if field.type == 'one2many':
5449 # duplicate following the order of the ids because we'll rely on
5450 # it later for copying translations in copy_translation()!
5451 lines = record[name].sorted(key='id').copy_data()
5452 # the lines are duplicated using the wrong (old) parent, but then are
5453 # reassigned to the correct one thanks to the (Command.CREATE, 0, ...)
5454 vals[name] = [Command.create(line) for line in lines if line]
5455 elif field.type == 'many2many':
5456 # copy only links that we can read, otherwise the write will fail
5457 vals[name] = [Command.set(record[name]._filtered_access('read').ids)]
5458 else:
5459 vals[name] = field.convert_to_write(record[name], record)
5460 vals_list.append(vals)
5461 return vals_list
5463 def copy_translations(self, new: Self, excluded: Collection[str] = ()) -> None:
5464 """ Recursively copy the translations from original to new record
5466 :param self: the original record
5467 :param new: the new record (copy of the original one)
5468 :param excluded: a container of user-provided field names
5469 """
5470 old = self
5471 # avoid recursion through already copied records in case of circular relationship
5472 if '__copy_translations_seen' not in old.env.context:
5473 old = old.with_context(__copy_translations_seen=defaultdict(set))
5474 seen_map = old.env.context['__copy_translations_seen']
5475 if old.id in seen_map[old._name]: 5475 ↛ 5476line 5475 didn't jump to line 5476 because the condition on line 5475 was never true
5476 return
5477 seen_map[old._name].add(old.id)
5478 valid_langs = set(code for code, _ in self.env['res.lang'].get_installed()) | {'en_US'}
5480 for name, field in old._fields.items():
5481 if not field.copy:
5482 continue
5484 if field.inherited and field.related.split('.')[0] in excluded:
5485 # inherited fields that come from a user-provided parent record
5486 # must not copy translations, as the parent record is not a copy
5487 # of the old parent record
5488 continue
5490 if field.type == 'one2many' and field.name not in excluded:
5491 # we must recursively copy the translations for o2m; here we
5492 # rely on the order of the ids to match the translations as
5493 # foreseen in copy_data()
5494 old_lines = old[name].sorted(key='id')
5495 new_lines = new[name].sorted(key='id')
5496 for (old_line, new_line) in zip(old_lines, new_lines):
5497 # don't pass excluded as it is not about those lines
5498 old_line.copy_translations(new_line)
5500 elif field.translate and field.store and name not in excluded and old[name]:
5501 # for translatable fields we copy their translations
5502 old_stored_translations = field._get_stored_translations(old)
5503 if not old_stored_translations: 5503 ↛ 5504line 5503 didn't jump to line 5504 because the condition on line 5503 was never true
5504 continue
5505 lang = self.env.lang or 'en_US'
5506 if field.translate is True:
5507 new.update_field_translations(name, {
5508 k: v for k, v in old_stored_translations.items() if k in valid_langs and k != lang
5509 })
5510 else:
5511 old_translations = {
5512 k: old_stored_translations.get(f'_{k}', v)
5513 for k, v in old_stored_translations.items()
5514 if k in valid_langs
5515 }
5516 # {from_lang_term: {lang: to_lang_term}
5517 translation_dictionary = field.get_translation_dictionary(
5518 old_translations.pop(lang, old_translations['en_US']),
5519 old_translations
5520 )
5521 # {lang: {old_term: new_term}}
5522 translations = defaultdict(dict)
5523 for from_lang_term, to_lang_terms in translation_dictionary.items():
5524 for lang, to_lang_term in to_lang_terms.items():
5525 translations[lang][from_lang_term] = to_lang_term
5526 new.update_field_translations(name, translations)
5528 def copy(self, default: ValuesType | None = None) -> Self:
5529 """ Duplicate record ``self`` updating it with default values.
5531 :param default: dictionary of field values to override in the
5532 original values of the copied record, e.g: ``{'field_name': overridden_value, ...}``
5533 :returns: new records
5535 """
5536 vals_list = self.with_context(active_test=False).copy_data(default)
5537 new_records = self.create(vals_list)
5538 for old_record, new_record in zip(self, new_records):
5539 old_record.copy_translations(new_record, excluded=default or ())
5540 return new_records
5542 @api.private
5543 def exists(self) -> Self:
5544 """ The subset of records in ``self`` that exist.
5545 It can be used as a test on records::
5547 if record.exists():
5548 ...
5550 By convention, new records are returned as existing.
5551 """
5552 new_ids, ids = partition(lambda i: isinstance(i, NewId), self._ids)
5553 if not ids:
5554 return self
5555 query = Query(self.env, self._table, self._table_sql)
5556 query.add_where(SQL("%s IN %s", SQL.identifier(self._table, 'id'), tuple(ids)))
5557 real_ids = (id_ for [id_] in self.env.execute_query(query.select()))
5558 valid_ids = {*real_ids, *new_ids}
5559 return self.browse(i for i in self._ids if i in valid_ids)
5561 @api.private
5562 def lock_for_update(self, *, allow_referencing: bool = False) -> None:
5563 """ Grab an exclusive write-lock to the rows with the given ids.
5565 This avoids blocking processing on the records due to concurrent
5566 modifications. If all records couldn't be locked, a `LockError`
5567 exception is raised.
5569 :param allow_referencing: Acquire a row lock which allows for other
5570 transactions to reference this record. Use only when modifying
5571 values that are not identifiers.
5572 :raises: ``LockError`` when some records could not be locked
5573 """
5574 ids = {id_ for id_ in self._ids if id_}
5575 if not ids: 5575 ↛ 5576line 5575 didn't jump to line 5576 because the condition on line 5575 was never true
5576 return
5577 query = Query(self.env, self._table, self._table_sql)
5578 query.add_where(SQL("%s IN %s", SQL.identifier(self._table, 'id'), tuple(ids)))
5579 # Use SKIP LOCKED instead of NOWAIT because the later aborts the
5580 # transaction and we do not want to use SAVEPOINTS.
5581 if allow_referencing: 5581 ↛ 5584line 5581 didn't jump to line 5584 because the condition on line 5581 was always true
5582 lock_sql = SQL("FOR NO KEY UPDATE SKIP LOCKED")
5583 else:
5584 lock_sql = SQL("FOR UPDATE SKIP LOCKED")
5585 rows = self.env.execute_query(SQL("%s %s", query.select(), lock_sql))
5586 if len(rows) != len(ids): 5586 ↛ 5587line 5586 didn't jump to line 5587 because the condition on line 5586 was never true
5587 raise LockError(self.env._("Cannot grab a lock on records"))
5589 @api.private
5590 def try_lock_for_update(self, *, allow_referencing: bool = False, limit: int | None = None) -> Self:
5591 """ Grab an exclusive write-lock on some rows with the given ids.
5593 Skip locked records and browse the records that could be locked.
5595 :param allow_referencing: Acquire a row lock which allows for other
5596 transactions to reference this record. Use only when modifying
5597 values that are not identifiers.
5598 :param limit: The maximum number of rows to lock
5599 :return: The recordset of locked records
5600 """
5601 new_ids, ids = partition(lambda i: isinstance(i, NewId), self._ids)
5602 if limit is not None:
5603 if len(new_ids) >= limit:
5604 return self.browse(new_ids[:limit])
5605 # keep the order of ids when trying to lock
5606 query = self.browse(ids)._as_query(ordered=True)
5607 query.limit = limit - len(new_ids)
5608 else:
5609 query = Query(self.env, self._table, self._table_sql)
5610 query.add_where(SQL("%s IN %s", SQL.identifier(self._table, 'id'), tuple(ids)))
5611 if not ids:
5612 return self
5613 if allow_referencing:
5614 lock_sql = SQL("FOR NO KEY UPDATE SKIP LOCKED")
5615 else:
5616 lock_sql = SQL("FOR UPDATE SKIP LOCKED")
5617 sql = SQL("%s %s", query.select(), lock_sql)
5618 real_ids = (id_ for [id_] in self.env.execute_query(sql))
5619 valid_ids = {*real_ids, *new_ids}
5620 return self.browse(i for i in self._ids if i in valid_ids)
5622 def _has_cycle(self, field_name=None) -> bool:
5623 """
5624 Return whether the records in ``self`` are in a loop by following the
5625 given relationship of the field.
5626 By default the **parent** field is used as the relationship.
5628 Note that since the method does not use EXCLUSIVE LOCK for the sake of
5629 performance, loops may still be created by concurrent transactions.
5631 :param field_name: optional field name (default: ``self._parent_name``)
5632 :return: **True** if a loop was found, **False** otherwise.
5633 """
5634 if not field_name:
5635 field_name = self._parent_name
5637 field = self._fields.get(field_name)
5638 if not field: 5638 ↛ 5639line 5638 didn't jump to line 5639 because the condition on line 5638 was never true
5639 raise ValueError(f'Invalid field_name: {field_name!r}')
5641 if not ( 5641 ↛ 5646line 5641 didn't jump to line 5646 because the condition on line 5641 was never true
5642 field.type in ('many2many', 'many2one')
5643 and field.comodel_name == self._name
5644 and field.store
5645 ):
5646 raise ValueError(f'Field must be a many2one or many2many relation on itself: {field_name!r}')
5648 if not self.ids: 5648 ↛ 5649line 5648 didn't jump to line 5649 because the condition on line 5648 was never true
5649 return False
5651 # must ignore 'active' flag, ir.rules, etc.
5652 # direct recursive SQL query with cycle detection for performance
5653 self.flush_model([field_name])
5654 if field.type == 'many2many':
5655 relation = field.relation
5656 column1 = field.column1
5657 column2 = field.column2
5658 else:
5659 relation = self._table
5660 column1 = 'id'
5661 column2 = field_name
5662 cr = self.env.cr
5663 cr.execute(SQL(
5664 """
5665 WITH RECURSIVE __reachability AS (
5666 SELECT %(col1)s AS source, %(col2)s AS destination
5667 FROM %(rel)s
5668 WHERE %(col1)s IN %(ids)s AND %(col2)s IS NOT NULL
5669 UNION
5670 SELECT r.source, t.%(col2)s
5671 FROM __reachability r
5672 JOIN %(rel)s t ON r.destination = t.%(col1)s AND t.%(col2)s IS NOT NULL
5673 )
5674 SELECT 1 FROM __reachability
5675 WHERE source = destination
5676 LIMIT 1
5677 """,
5678 ids=tuple(self.ids),
5679 rel=SQL.identifier(relation),
5680 col1=SQL.identifier(column1),
5681 col2=SQL.identifier(column2),
5682 ))
5683 return bool(cr.fetchone())
5685 @api.deprecated("Deprecated since 18.0, use _has_cycle() instead")
5686 def _check_recursion(self, parent=None):
5687 return not self._has_cycle(parent)
5689 @api.deprecated("Deprecated since 18.0, use _has_cycle() instead")
5690 def _check_m2m_recursion(self, field_name):
5691 return not self._has_cycle(field_name)
5693 def _get_external_ids(self) -> dict[IdType, list[str]]:
5694 """Retrieve the External ID(s) of any database record.
5696 **Synopsis**: ``_get_external_ids() -> { 'id': ['module.external_id'] }``
5698 :return: map of ids to the list of their fully qualified External IDs
5699 in the form ``module.key``, or an empty list when there's no External
5700 ID for a record, e.g.::
5702 { 'id': ['module.ext_id', 'module.ext_id_bis'],
5703 'id2': [] }
5704 """
5705 result = defaultdict(list)
5706 domain: DomainType = [('model', '=', self._name), ('res_id', 'in', self.ids)]
5707 for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id'], order='id'):
5708 result[data['res_id']].append('%(module)s.%(name)s' % data)
5709 return {
5710 record.id: result[record._origin.id]
5711 for record in self
5712 }
5714 def get_external_id(self) -> dict[IdType, str]:
5715 """Retrieve the External ID of any database record, if there
5716 is one. This method works as a possible implementation
5717 for a function field, to be able to add it to any
5718 model object easily, referencing it as ``Model.get_external_id``.
5720 When multiple External IDs exist for a record, only one
5721 of them is returned (randomly).
5723 :return: map of ids to their fully qualified XML ID,
5724 defaulting to an empty string when there's none
5725 (to be usable as a function field),
5726 e.g.::
5728 { 'id': 'module.ext_id',
5729 'id2': '' }
5730 """
5731 results = self._get_external_ids()
5732 return {key: val[0] if val else ''
5733 for key, val in results.items()}
5735 @classmethod
5736 def is_transient(cls) -> bool:
5737 """ Return whether the model is transient.
5739 See :class:`TransientModel`.
5741 """
5742 return cls._transient
5744 @api.model
5745 @api.readonly
5746 def search_read(
5747 self,
5748 domain: DomainType | None = None,
5749 fields: Sequence[str] | None = None,
5750 offset: int = 0,
5751 limit: int | None = None,
5752 order: str | None = None,
5753 **read_kwargs,
5754 ) -> list[ValuesType]:
5755 """ Perform a :meth:`search_fetch` followed by a :meth:`_read_format`.
5757 :param domain: Search domain, see ``args`` parameter in :meth:`search`.
5758 Defaults to an empty domain that will match all records.
5759 :param fields: List of fields to read, see ``fields`` parameter in :meth:`read`.
5760 Defaults to all fields.
5761 :param offset: Number of records to skip, see ``offset`` parameter in :meth:`search`.
5762 Defaults to 0.
5763 :param limit: Maximum number of records to return, see ``limit`` parameter in :meth:`search`.
5764 Defaults to no limit.
5765 :param order: Columns to sort result, see ``order`` parameter in :meth:`search`.
5766 Defaults to no sort.
5767 :param read_kwargs: All read keywords arguments used to call
5768 ``read(..., **read_kwargs)`` method e.g. you can use
5769 ``search_read(..., load='')`` in order to avoid computing display_name
5770 :return: List of dictionaries containing the asked fields.
5771 """
5772 if not fields: 5772 ↛ 5773line 5772 didn't jump to line 5773 because the condition on line 5772 was never true
5773 fields = list(self.fields_get(attributes=()))
5774 records = self.search_fetch(domain or [], fields, offset=offset, limit=limit, order=order)
5776 # Method _read_format() ignores 'active_test', but it would forward it
5777 # to any downstream search call(e.g. for x2m or computed fields), and
5778 # this is not the desired behavior. The flag was presumably only meant
5779 # for the main search().
5780 if 'active_test' in self.env.context:
5781 context = dict(self.env.context)
5782 del context['active_test']
5783 records = records.with_context(context)
5785 return records._read_format(fnames=fields, **read_kwargs)
5787 @api.deprecated("Deprecated since 19.0, use action_archive or action_unarchive")
5788 def toggle_active(self):
5789 "Inverses the value of :attr:`active` on the records in ``self``."
5790 assert self._active_name, f"No 'active' field on model {self._name}"
5791 active_recs = self.filtered(self._active_name)
5792 active_recs.action_archive()
5793 (self - active_recs).action_unarchive()
5795 def action_archive(self):
5796 """Set :attr:`active` to ``False`` on a recordset for active records.
5798 Note, you probably want to override `write()` method if you want to take
5799 action once the active field changes.
5800 """
5801 field_name = self._active_name
5802 assert field_name, f"No 'active' field on model {self._name}"
5803 active_recs = self.filtered(lambda record: record[field_name])
5804 active_recs[field_name] = False
5806 def action_unarchive(self):
5807 """Set :attr:`active` to ``True`` on a recordset for inactive records.
5809 Note, you probably want to override `write()` method if you want to take
5810 action once the active field changes.
5811 """
5812 field_name = self._active_name
5813 assert field_name, f"No 'active' field on model {self._name}"
5814 inactive_recs = self.filtered(lambda record: not record[field_name])
5815 inactive_recs[field_name] = True
5817 def _register_hook(self) -> None:
5818 """ stuff to do right after the registry is built """
5820 def _unregister_hook(self) -> None:
5821 """ Clean up what `~._register_hook` has done. """
5823 def _get_redirect_suggested_company(self) -> BaseModel | typing.Literal[False]:
5824 """Return the suggested company to be set on the context
5825 in case of a URL redirection to the record. To avoid multi
5826 company issues when clicking on a shared link, this
5827 could be called to try setting the most suited company on
5828 the allowed_company_ids in the context. This method can be
5829 overridden, for example on the hr.leave model, where the
5830 most suited company is the company of the leave type, as
5831 specified by the ir.rule.
5832 """
5833 if 'company_id' in self:
5834 return self.company_id
5835 elif 'company_ids' in self:
5836 return (self.company_ids & self.env.user.company_ids)[:1]
5837 return False
5839 def _can_return_content(
5840 self, field_name: str | None = None, access_token: str | None = None
5841 ) -> bool:
5842 """Determine whether one can export a file or an image from a field of
5843 record ``self``, even if ``self`` is not accessible to the current user.
5844 If so, the record will be ``sudo()``-ed to access the corresponding file
5845 or image.
5847 :param field_name: image field name to check the access to
5848 :param access_token: access token to use instead of the
5849 access rights and access rules
5850 :return: whether the extra access is allowed
5851 """
5852 self.ensure_one()
5853 return False
5855 #
5856 # Instance creation
5857 #
5858 # An instance represents an ordered collection of records in a given
5859 # execution environment. The instance object refers to the environment, and
5860 # the records themselves are represented by their cache dictionary. The 'id'
5861 # of each record is found in its corresponding cache dictionary.
5862 #
5863 # This design has the following advantages:
5864 # - cache access is direct and thus fast;
5865 # - one can consider records without an 'id' (see new records);
5866 # - the global cache is only an index to "resolve" a record 'id'.
5867 #
5869 def __init__(self, env: Environment, ids: tuple[IdType, ...], prefetch_ids: Reversible[IdType]):
5870 """ Create a recordset instance.
5872 :param env: an environment
5873 :param ids: a tuple of record ids
5874 :param prefetch_ids: a reversible iterable of record ids (for prefetching)
5875 """
5876 self.env = env
5877 self._ids = ids
5878 self._prefetch_ids = prefetch_ids
5880 @api.private
5881 def browse(self, ids: int | typing.Iterable[IdType] = ()) -> Self:
5882 """Return a recordset for the ids provided as parameter in the current
5883 environment.
5885 .. code-block:: python
5887 self.browse([7, 18, 12])
5888 res.partner(7, 18, 12)
5889 """
5890 if not ids:
5891 ids = ()
5892 elif ids.__class__ is int:
5893 ids = (ids,)
5894 else:
5895 ids = tuple(ids)
5896 return self.__class__(self.env, ids, ids)
5898 #
5899 # Internal properties, for manipulating the instance's implementation
5900 #
5902 @property
5903 def ids(self) -> list[int]:
5904 """ Return the list of actual record ids corresponding to ``self``. """
5905 if all(self._ids):
5906 return list(self._ids) # already real records
5907 return list(OriginIds(self._ids))
5909 @property
5910 @api.deprecated("Deprecated since 19.0, use self.env.cr directly")
5911 def _cr(self):
5912 return self.env.cr
5914 @property
5915 @api.deprecated("Deprecated since 19.0, use self.env.uid directly")
5916 def _uid(self):
5917 return self.env.uid
5919 @property
5920 @api.deprecated("Deprecated since 19.0, use self.env.context directly")
5921 def _context(self):
5922 return self.env.context
5924 #
5925 # Conversion methods
5926 #
5928 @api.private
5929 def ensure_one(self) -> Self:
5930 """Verify that the current recordset holds a single record.
5932 :raise odoo.exceptions.ValueError: ``len(self) != 1``
5933 """
5934 try:
5935 # unpack to ensure there is only one value is faster than len when true and
5936 # has a significant impact as this check is largely called
5937 _id, = self._ids
5938 return self
5939 except ValueError:
5940 raise ValueError("Expected singleton: %s" % self)
5942 @api.private
5943 def with_env(self, env: Environment) -> Self:
5944 """Return a new version of this recordset attached to the provided environment.
5946 .. note::
5947 The returned recordset has the same prefetch object as ``self``.
5948 """
5949 return self.__class__(env, self._ids, self._prefetch_ids)
5951 @api.private
5952 def sudo(self, flag: bool = True) -> Self:
5953 """ Return a new version of this recordset with superuser mode enabled or
5954 disabled, depending on `flag`. The superuser mode does not change the
5955 current user, and simply bypasses access rights checks.
5957 .. warning::
5959 Using ``sudo`` could cause data access to cross the
5960 boundaries of record rules, possibly mixing records that
5961 are meant to be isolated (e.g. records from different
5962 companies in multi-company environments).
5964 It may lead to un-intuitive results in methods which select one
5965 record among many - for example getting the default company, or
5966 selecting a Bill of Materials.
5968 .. note::
5970 The returned recordset has the same prefetch object as ``self``.
5972 """
5973 assert isinstance(flag, bool)
5974 if flag == self.env.su:
5975 return self
5976 return self.with_env(self.env(su=flag))
5978 @api.private
5979 def with_user(self, user: BaseModel | IdType) -> Self:
5980 """ Return a new version of this recordset attached to the given user, in
5981 non-superuser mode, unless `user` is the superuser (by convention, the
5982 superuser is always in superuser mode.)
5983 """
5984 if not user: 5984 ↛ 5985line 5984 didn't jump to line 5985 because the condition on line 5984 was never true
5985 return self
5986 return self.with_env(self.env(user=user, su=False))
5988 @api.private
5989 def with_company(self, company: BaseModel | IdType) -> Self:
5990 """ Return a new version of this recordset with a modified context, such that::
5992 result.env.company = company
5993 result.env.companies = self.env.companies | company
5995 .. warning::
5997 When using an unauthorized company for current user,
5998 accessing the company(ies) on the environment may trigger
5999 an AccessError if not done in a sudoed environment.
6000 """
6001 if not company:
6002 # With company = None/False/0/[]/empty recordset: keep current environment
6003 return self
6005 company_id = int(company)
6006 allowed_company_ids = self.env.context.get('allowed_company_ids') or []
6007 if allowed_company_ids and company_id == allowed_company_ids[0]:
6008 return self
6009 # Copy the allowed_company_ids list
6010 # to avoid modifying the context of the current environment.
6011 allowed_company_ids = list(allowed_company_ids)
6012 if company_id in allowed_company_ids: 6012 ↛ 6013line 6012 didn't jump to line 6013 because the condition on line 6012 was never true
6013 allowed_company_ids.remove(company_id)
6014 allowed_company_ids.insert(0, company_id)
6016 return self.with_context(allowed_company_ids=allowed_company_ids)
6018 @api.private
6019 def with_context(self, ctx: dict[str, typing.Any] | None = None, /, **overrides) -> Self:
6020 """ Return a new version of this recordset attached to an extended
6021 context.
6023 The extended context is either the provided ``context`` in which
6024 ``overrides`` are merged or the *current* context in which
6025 ``overrides`` are merged e.g.::
6027 # current context is {'key1': True}
6028 r2 = records.with_context({}, key2=True)
6029 # -> r2.env.context is {'key2': True}
6030 r2 = records.with_context(key2=True)
6031 # -> r2.env.context is {'key1': True, 'key2': True}
6033 .. note:
6035 The returned recordset has the same prefetch object as ``self``.
6036 """ # noqa: RST210
6037 context = dict(ctx if ctx is not None else self.env.context, **overrides)
6038 if 'force_company' in context: 6038 ↛ 6039line 6038 didn't jump to line 6039 because the condition on line 6038 was never true
6039 warnings.warn(
6040 "Since 19.0, context key 'force_company' is no longer supported. "
6041 "Use with_company(company) instead.",
6042 DeprecationWarning,
6043 )
6044 if 'company' in context: 6044 ↛ 6045line 6044 didn't jump to line 6045 because the condition on line 6044 was never true
6045 warnings.warn(
6046 "Context key 'company' is not recommended, because "
6047 "of its special meaning in @depends_context.",
6048 )
6049 if 'allowed_company_ids' not in context and 'allowed_company_ids' in self.env.context:
6050 # Force 'allowed_company_ids' to be kept when context is overridden
6051 # without 'allowed_company_ids'
6052 context['allowed_company_ids'] = self.env.context['allowed_company_ids']
6053 return self.with_env(self.env(context=context))
6055 @api.private
6056 def with_prefetch(self, prefetch_ids: Reversible[IdType] | None = None) -> Self:
6057 """ Return a new version of this recordset that uses the given prefetch ids,
6058 or ``self``'s ids if not given.
6059 """
6060 if prefetch_ids is None: 6060 ↛ 6061line 6060 didn't jump to line 6061 because the condition on line 6060 was never true
6061 prefetch_ids = self._ids
6062 return self.__class__(self.env, self._ids, prefetch_ids)
6064 def _update_cache(self, values: ValuesType, validate: bool = True) -> None:
6065 """ Update the cache of ``self`` with ``values``.
6067 :param values: dict of field values, in any format.
6068 :param validate: whether values must be checked
6069 """
6070 self.ensure_one()
6071 fields = self._fields
6072 try:
6073 field_values = [(fields[name], value) for name, value in values.items() if name != 'id']
6074 except KeyError as e:
6075 raise ValueError("Invalid field %r on model %r" % (e.args[0], self._name))
6077 # convert monetary fields after other columns for correct value rounding
6078 for field, value in sorted(field_values, key=lambda item: item[0].write_sequence):
6079 value = field.convert_to_cache(value, self, validate)
6080 field._update_cache(self, value)
6082 # set inverse fields on new records in the comodel
6083 if field.relational:
6084 inv_recs = self[field.name].filtered(lambda r: not r.id)
6085 if not inv_recs:
6086 continue
6087 # we need to adapt the value of the inverse fields to integrate self into it:
6088 # x2many fields should add self, while many2one fields should replace with self
6089 for invf in self.pool.field_inverses[field]:
6090 invf._update_inverse(inv_recs, self)
6092 def _convert_to_record(self, values):
6093 """ Convert the ``values`` dictionary from the cache format to the
6094 record format.
6095 """
6096 return {
6097 name: self._fields[name].convert_to_record(value, self)
6098 for name, value in values.items()
6099 }
6101 def _convert_to_write(self, values):
6102 """ Convert the ``values`` dictionary into the format of :meth:`write`. """
6103 fields = self._fields
6104 result = {}
6105 for name, value in values.items():
6106 if name in fields: 6106 ↛ 6105line 6106 didn't jump to line 6105 because the condition on line 6106 was always true
6107 field = fields[name]
6108 value = field.convert_to_write(value, self)
6109 if not isinstance(value, NewId): 6109 ↛ 6105line 6109 didn't jump to line 6105 because the condition on line 6109 was always true
6110 result[name] = value
6111 return result
6113 #
6114 # Record traversal and update
6115 #
6117 @typing.overload
6118 def mapped(self, func: str) -> list[typing.Any] | BaseModel:
6119 ...
6121 @typing.overload
6122 def mapped(self, func: Callable[[Self], T]) -> list[T] | BaseModel:
6123 ...
6125 @api.private
6126 def mapped(self, func: str | Callable[[Self], T]) -> list | BaseModel:
6127 """Apply ``func`` on all records in ``self``, and return the result as a
6128 list or a recordset (if ``func`` return recordsets). In the latter
6129 case, the order of the returned recordset is arbitrary.
6131 :param func: a function or a dot-separated sequence of field names
6132 :return: self if func is falsy, result of func applied to all ``self`` records.
6134 .. code-block:: python3
6136 # returns a list of summing two fields for each record in the set
6137 records.mapped(lambda r: r.field1 + r.field2)
6139 The provided function can be a string to get field values:
6141 .. code-block:: python3
6143 # returns a list of names
6144 records.mapped('name')
6146 # returns a recordset of partners
6147 records.mapped('partner_id')
6149 # returns the union of all partner banks, with duplicates removed
6150 records.mapped('partner_id.bank_ids')
6151 """
6152 if not func:
6153 return self # support for an empty path of fields
6155 if isinstance(func, str):
6156 # special case: sequence of field names
6157 *rel_field_names, field_name = func.split('.')
6158 records = self
6159 for rel_field_name in rel_field_names:
6160 records = records[rel_field_name]
6161 if len(records) > PREFETCH_MAX:
6162 # fetch fields for all recordset in case we have a recordset
6163 # that is larger than the prefetch
6164 records.fetch([field_name])
6165 field = records._fields[field_name]
6166 getter = field.__get__
6167 if field.relational:
6168 # union of records
6169 return getter(records)
6170 return [getter(record) for record in records]
6172 if self:
6173 vals = [func(rec) for rec in self]
6174 if isinstance(vals[0], BaseModel): 6174 ↛ 6175line 6174 didn't jump to line 6175 because the condition on line 6174 was never true
6175 return vals[0].union(*vals)
6176 return vals
6177 else:
6178 # we want to follow-up the comodel from the function
6179 # so we pass an empty recordset
6180 vals = func(self)
6181 return vals if isinstance(vals, BaseModel) else []
6183 @api.private
6184 def filtered(self, func: str | Callable[[Self], bool] | Domain) -> Self:
6185 """Return the records in ``self`` satisfying ``func``.
6187 :param func: a function, Domain or a dot-separated sequence of field names
6188 :return: recordset of records satisfying func, may be empty.
6190 .. code-block:: python3
6192 # only keep records whose company is the current user's
6193 records.filtered(lambda r: r.company_id == user.company_id)
6195 # only keep records whose partner is a company
6196 records.filtered("partner_id.is_company")
6197 """
6198 if not func: 6198 ↛ 6200line 6198 didn't jump to line 6200 because the condition on line 6198 was never true
6199 # align with mapped()
6200 return self
6201 if callable(func):
6202 # normal function
6203 pass
6204 elif isinstance(func, str):
6205 if '.' in func:
6206 return self.browse(rec_id for rec_id, rec in zip(self._ids, self) if any(rec.mapped(func)))
6207 # avoid costly mapped
6208 func = self._fields[func].__get__
6209 elif isinstance(func, Domain): 6209 ↛ 6212line 6209 didn't jump to line 6212 because the condition on line 6209 was always true
6210 return self.filtered_domain(func)
6211 else:
6212 raise TypeError(f"Invalid function {func!r} to filter on {self._name}")
6213 return self.browse(rec_id for rec_id, rec in zip(self._ids, self) if func(rec))
6215 @typing.overload
6216 def grouped(self, key: str) -> dict[typing.Any, Self]:
6217 ...
6219 @typing.overload
6220 def grouped(self, key: Callable[[Self], T]) -> dict[T, Self]:
6221 ...
6223 @api.private
6224 def grouped(self, key: str | Callable[[Self], T]) -> dict[typing.Any, Self]:
6225 """Eagerly groups the records of ``self`` by the ``key``, returning a
6226 dict from the ``key``'s result to recordsets. All the resulting
6227 recordsets are guaranteed to be part of the same prefetch-set.
6229 Provides a convenience method to partition existing recordsets without
6230 the overhead of a :meth:`~._read_group`, but performs no aggregation.
6232 .. note:: unlike :func:`itertools.groupby`, does not care about input
6233 ordering, however the tradeoff is that it can not be lazy
6235 :param key: either a callable from a :class:`Model` to a (hashable)
6236 value, or a field name. In the latter case, it is equivalent
6237 to ``itemgetter(key)`` (aka the named field's value)
6238 """
6239 if isinstance(key, str):
6240 key = itemgetter(key)
6242 collator = defaultdict(list)
6243 for record in self:
6244 collator[key(record)].extend(record._ids)
6246 browse = functools.partial(type(self), self.env, prefetch_ids=self._prefetch_ids)
6247 return {key: browse(tuple(ids)) for key, ids in collator.items()}
6249 @api.private
6250 def filtered_domain(self, domain: DomainType) -> Self:
6251 """Return the records in ``self`` satisfying the domain and keeping the same order.
6253 :param domain: :ref:`A search domain <reference/orm/domains>`.
6254 """
6255 if not self or not domain:
6256 return self
6257 predicate = Domain(domain)._as_predicate(self)
6258 return self.browse(rec_id for rec_id, rec in zip(self._ids, self) if predicate(rec))
6260 @api.private
6261 def sorted(self, key: Callable[[Self], typing.Any] | str | None = None, reverse: bool = False) -> Self:
6262 """Return the recordset ``self`` ordered by ``key``.
6264 :param key:
6265 It can be either of:
6267 * a function of one argument that returns a comparison key for each record
6268 * a string representing a comma-separated list of field names with optional
6269 NULLS (FIRST|LAST), and (ASC|DESC) directions
6270 * ``None``, in which case records are ordered according the default model's order
6271 :param reverse: if ``True``, return the result in reverse order
6273 .. code-block:: python3
6275 # sort records by name
6276 records.sorted(key=lambda r: r.name)
6277 # sort records by name in descending order, then by id
6278 records.sorted('name DESC, id')
6279 # sort records using default order
6280 records.sorted()
6281 """
6282 if len(self) < 2:
6283 return self
6284 if isinstance(key, str):
6285 key = self._sorted_order_to_function(key)
6286 elif key is None:
6287 key = self._sorted_order_to_function(self._order)
6288 ids = tuple(item.id for item in sorted(self, key=key, reverse=reverse))
6289 return self.__class__(self.env, ids, self._prefetch_ids)
6291 @api.model
6292 def _sorted_order_to_function(self, order: str) -> Callable[[BaseModel], tuple]:
6293 def order_to_function(order_part):
6294 order_match = regex_order.match(order_part)
6295 if not order_match: 6295 ↛ 6296line 6295 didn't jump to line 6296 because the condition on line 6295 was never true
6296 raise ValueError(f"Invalid order {order!r} to sort")
6297 field_name = order_match['field']
6298 property_name = order_match['property']
6299 reverse = (order_match['direction'] or '').upper() == 'DESC'
6300 nulls = (order_match['nulls'] or '').upper()
6301 if nulls: 6301 ↛ 6302line 6301 didn't jump to line 6302 because the condition on line 6301 was never true
6302 nulls_first = nulls == 'NULLS FIRST'
6303 else:
6304 nulls_first = reverse
6306 field = self._fields[field_name]
6307 field_expr = f'{field_name}.{property_name}' if property_name else field_name
6308 if field.type == 'many2one' and (not property_name or property_name == 'id'): 6308 ↛ 6309line 6308 didn't jump to line 6309 because the condition on line 6308 was never true
6309 seen = self.env.context.get('__m2o_order_seen_sorted', ())
6310 if field in seen:
6311 return lambda _: None
6312 comodel = self.env[field.comodel_name].with_context(__m2o_order_seen_sorted=frozenset((field, *seen)))
6313 func_comodel = comodel._sorted_order_to_function(property_name or comodel._order)
6315 def getter(rec):
6316 value = rec[field_name]
6317 if not value:
6318 return None
6319 return func_comodel(value)
6320 elif field.relational: 6320 ↛ 6321line 6320 didn't jump to line 6321 because the condition on line 6320 was never true
6321 raise ValueError(f"Invalid order on relational field {order_part!r} to sort")
6322 elif field.type == 'boolean': 6322 ↛ 6323line 6322 didn't jump to line 6323 because the condition on line 6322 was never true
6323 getter = field.expression_getter(field_expr)
6324 else:
6325 raw_getter = field.expression_getter(field_expr)
6327 def getter(rec):
6328 value = raw_getter(rec)
6329 return value if value is not False else None
6331 comparator = functools.partial(
6332 ReversibleComparator,
6333 reverse=reverse,
6334 none_first=nulls_first,
6335 )
6336 return lambda rec: comparator(getter(rec))
6338 item_makers = [
6339 order_to_function(order_part)
6340 for order_part in order.split(',')
6341 ]
6342 return lambda rec: tuple(fn(rec) for fn in item_makers)
6344 @api.private
6345 def update(self, values: ValuesType) -> None:
6346 """ Update the records in ``self`` with ``values``. """
6347 for name, value in values.items():
6348 self[name] = value
6350 @api.private
6351 def flush_model(self, fnames: Collection[str] | None = None) -> None:
6352 """ Process the pending computations and database updates on ``self``'s
6353 model. When the parameter is given, the method guarantees that at least
6354 the given fields are flushed to the database. More fields can be
6355 flushed, though.
6357 :param fnames: optional iterable of field names to flush
6358 """
6359 self._recompute_model(fnames)
6360 dirty_fields = self.env._field_dirty
6361 if fnames is None or any(self._fields[fname] in dirty_fields for fname in fnames):
6362 self._flush()
6364 @api.private
6365 def flush_recordset(self, fnames: Collection[str] | None = None) -> None:
6366 """ Process the pending computations and database updates on the records
6367 ``self``. When the parameter is given, the method guarantees that at
6368 least the given fields on records ``self`` are flushed to the database.
6369 More fields and records can be flushed, though.
6371 :param fnames: optional iterable of field names to flush
6372 """
6373 if not self:
6374 return
6375 self._recompute_recordset(fnames)
6376 if fnames is None:
6377 fields = self._fields.values()
6378 else:
6379 fields = [self._fields[fname] for fname in fnames]
6380 ids = set(self._ids)
6381 dirty_fields = self.env._field_dirty
6382 if not all(ids.isdisjoint(dirty_fields.get(field, ())) for field in fields):
6383 self._flush()
6385 def _flush(self) -> None:
6386 # pop dirty fields and their corresponding record ids from cache
6387 dirty_fields = self.env._field_dirty
6388 dirty_field_ids = {
6389 field: ids
6390 for field in self._fields.values()
6391 if (ids := dirty_fields.pop(field, None))
6392 }
6393 if not dirty_field_ids:
6394 return
6396 # for context-dependent fields, `get_column_update` contains the
6397 # logic to find which value to flush
6398 model = self
6400 # sort dirty record ids so that records with the same set of modified
6401 # fields are grouped together; for that purpose, map each dirty id to
6402 # an integer that represents its subset of dirty fields (bitmask)
6403 dirty_ids = sorted(
6404 OrderedSet(id_ for ids in dirty_field_ids.values() for id_ in ids),
6405 key=lambda id_: sum(
6406 1 << field_index
6407 for field_index, ids in enumerate(dirty_field_ids.values())
6408 if id_ in ids
6409 ),
6410 )
6412 # perform updates in batches in order to limit memory footprint
6413 BATCH_SIZE = 1000
6414 for some_ids in split_every(BATCH_SIZE, dirty_ids):
6415 vals_list = []
6416 try:
6417 for id_ in some_ids:
6418 record = model.browse((id_,))
6419 vals_list.append({
6420 f.name: f.get_column_update(record)
6421 for f, ids in dirty_field_ids.items()
6422 if id_ in ids
6423 })
6424 except KeyError:
6425 raise AssertionError(
6426 f"Could not find all values of {record} to flush them\n"
6427 f" Context: {self.env.context}\n"
6428 f" Cache: {self.env.cache!r}"
6429 )
6430 model.browse(some_ids)._write_multi(vals_list)
6432 #
6433 # New records - represent records that do not exist in the database yet;
6434 # they are used to perform onchanges.
6435 #
6437 @api.model
6438 @api.private
6439 def new(self, values: ValuesType | None = None, origin: Self | None = None, ref: str | None = None) -> Self:
6440 """ Return a new record instance attached to the current environment and
6441 initialized with the provided ``value``. The record is *not* created
6442 in database, it only exists in memory.
6444 One can pass an ``origin`` record, which is the actual record behind the
6445 result. It is retrieved as ``record._origin``. Two new records with the
6446 same origin record are considered equal.
6448 One can also pass a ``ref`` value to identify the record among other new
6449 records. The reference is encapsulated in the ``id`` of the record.
6450 """
6451 if values is None: 6451 ↛ 6452line 6451 didn't jump to line 6452 because the condition on line 6451 was never true
6452 values = {}
6453 if origin is not None: 6453 ↛ 6454line 6453 didn't jump to line 6454 because the condition on line 6453 was never true
6454 origin = origin.id
6455 record = self.browse((NewId(origin, ref),))
6456 record._update_cache(values, validate=False)
6458 return record
6460 @property
6461 def _origin(self) -> Self:
6462 """ Return the actual records corresponding to ``self``. """
6463 if all(self._ids):
6464 return self # already real records
6465 ids = tuple(OriginIds(self._ids))
6466 prefetch_ids = OriginIds(self._prefetch_ids)
6467 return self.__class__(self.env, ids, prefetch_ids)
6469 #
6470 # "Dunder" methods
6471 #
6473 def __bool__(self) -> bool:
6474 """ Test whether ``self`` is nonempty. """
6475 return True if self._ids else False # fast version of bool(self._ids)
6477 def __len__(self) -> int:
6478 """ Return the size of ``self``. """
6479 return len(self._ids)
6481 def __iter__(self) -> Iterator[Self]:
6482 """ Return an iterator over ``self``. """
6483 ids = self._ids
6484 size = len(ids)
6485 if size <= 1:
6486 # detect and handle small recordsets (single `1f`)
6487 # early return if no records and avoid allocation if we have a one
6488 if size == 1:
6489 yield self
6490 return
6491 cls = self.__class__
6492 env = self.env
6493 prefetch_ids = self._prefetch_ids
6494 if size > PREFETCH_MAX and prefetch_ids is ids:
6495 for sub_ids in split_every(PREFETCH_MAX, ids):
6496 for id_ in sub_ids:
6497 yield cls(env, (id_,), sub_ids)
6498 else:
6499 for id_ in ids:
6500 yield cls(env, (id_,), prefetch_ids)
6502 def __reversed__(self) -> Iterator[Self]:
6503 """ Return an reversed iterator over ``self``. """
6504 # same as __iter__ but reversed
6505 ids = self._ids
6506 size = len(ids)
6507 if size <= 1:
6508 if size == 1:
6509 yield self
6510 return
6511 cls = self.__class__
6512 env = self.env
6513 prefetch_ids = self._prefetch_ids
6514 if size > PREFETCH_MAX and prefetch_ids is ids: 6514 ↛ 6515line 6514 didn't jump to line 6515 because the condition on line 6514 was never true
6515 for sub_ids in split_every(PREFETCH_MAX, reversed(ids)):
6516 for id_ in sub_ids:
6517 yield cls(env, (id_,), sub_ids)
6518 else:
6519 prefetch_ids = ReversedIterable(prefetch_ids)
6520 for id_ in reversed(ids):
6521 yield cls(env, (id_,), prefetch_ids)
6523 def __contains__(self, item: BaseModel | str) -> bool:
6524 """ Test whether ``item`` (record or field name) is an element of ``self``.
6526 In the first case, the test is fully equivalent to::
6528 any(item == record for record in self)
6530 In the second case, we check whether the model has a field named
6531 ``item``.
6532 """
6533 try:
6534 if self._name == item._name: 6534 ↛ 6536line 6534 didn't jump to line 6536 because the condition on line 6534 was always true
6535 return len(item) == 1 and item.id in self._ids
6536 raise TypeError(f"inconsistent models in: {item} in {self}")
6537 except AttributeError:
6538 if isinstance(item, str): 6538 ↛ 6540line 6538 didn't jump to line 6540 because the condition on line 6538 was always true
6539 return item in self._fields
6540 raise TypeError(f"unsupported operand types in: {item!r} in {self}")
6542 def __add__(self, other) -> Self:
6543 """ Return the concatenation of two recordsets. """
6544 return self.concat(other)
6546 @api.private
6547 def concat(self, *args: Self) -> Self:
6548 """ Return the concatenation of ``self`` with all the arguments (in
6549 linear time complexity).
6550 """
6551 ids = list(self._ids)
6552 for arg in args:
6553 try:
6554 if arg._name != self._name: 6554 ↛ 6555line 6554 didn't jump to line 6555 because the condition on line 6554 was never true
6555 raise TypeError(f"inconsistent models in: {self} + {arg}")
6556 ids.extend(arg._ids)
6557 except AttributeError:
6558 raise TypeError(f"unsupported operand types in: {self} + {arg!r}")
6559 return self.browse(ids)
6561 def __sub__(self, other) -> Self:
6562 """ Return the recordset of all the records in ``self`` that are not in
6563 ``other``. Note that recordset order is preserved.
6564 """
6565 try:
6566 if self._name != other._name: 6566 ↛ 6567line 6566 didn't jump to line 6567 because the condition on line 6566 was never true
6567 raise TypeError(f"inconsistent models in: {self} - {other}")
6568 other_ids = set(other._ids)
6569 return self.browse(id_ for id_ in self._ids if id_ not in other_ids)
6570 except AttributeError:
6571 raise TypeError(f"unsupported operand types in: {self} - {other!r}")
6573 def __and__(self, other) -> Self:
6574 """ Return the intersection of two recordsets.
6575 Note that first occurrence order is preserved.
6576 """
6577 try:
6578 if self._name != other._name: 6578 ↛ 6579line 6578 didn't jump to line 6579 because the condition on line 6578 was never true
6579 raise TypeError(f"inconsistent models in: {self} & {other}")
6580 other_ids = set(other._ids)
6581 return self.browse(OrderedSet(id_ for id_ in self._ids if id_ in other_ids))
6582 except AttributeError:
6583 raise TypeError(f"unsupported operand types in: {self} & {other!r}")
6585 def __or__(self, other) -> Self:
6586 """ Return the union of two recordsets.
6587 Note that first occurrence order is preserved.
6588 """
6589 return self.union(other)
6591 @api.private
6592 def union(self, *args: Self) -> Self:
6593 """ Return the union of ``self`` with all the arguments (in linear time
6594 complexity, with first occurrence order preserved).
6595 """
6596 ids = list(self._ids)
6597 for arg in args:
6598 try:
6599 if arg._name != self._name: 6599 ↛ 6600line 6599 didn't jump to line 6600 because the condition on line 6599 was never true
6600 raise TypeError(f"inconsistent models in: {self} | {arg}")
6601 ids.extend(arg._ids)
6602 except AttributeError:
6603 raise TypeError(f"unsupported operand types in: {self} | {arg!r}")
6604 return self.browse(OrderedSet(ids))
6606 def __eq__(self, other):
6607 """ Test whether two recordsets are equivalent (up to reordering). """
6608 try:
6609 return self._name == other._name and set(self._ids) == set(other._ids)
6610 except AttributeError:
6611 if other: 6611 ↛ 6612line 6611 didn't jump to line 6612 because the condition on line 6611 was never true
6612 warnings.warn(f"unsupported operand type(s) for \"==\": '{self._name}()' == '{other!r}'", stacklevel=2)
6613 return NotImplemented
6615 def __lt__(self, other):
6616 try:
6617 if self._name == other._name: 6617 ↛ 6621line 6617 didn't jump to line 6621 because the condition on line 6617 was always true
6618 return set(self._ids) < set(other._ids)
6619 except AttributeError:
6620 pass
6621 return NotImplemented
6623 def __le__(self, other):
6624 try:
6625 if self._name == other._name: 6625 ↛ 6634line 6625 didn't jump to line 6634 because the condition on line 6625 was always true
6626 # these are much cheaper checks than a proper subset check, so
6627 # optimise for checking if a null or singleton are subsets of a
6628 # recordset
6629 if not self or self in other: 6629 ↛ 6631line 6629 didn't jump to line 6631 because the condition on line 6629 was always true
6630 return True
6631 return set(self._ids) <= set(other._ids)
6632 except AttributeError:
6633 pass
6634 return NotImplemented
6636 def __gt__(self, other):
6637 try:
6638 if self._name == other._name:
6639 return set(self._ids) > set(other._ids)
6640 except AttributeError:
6641 pass
6642 return NotImplemented
6644 def __ge__(self, other):
6645 try:
6646 if self._name == other._name: 6646 ↛ 6652line 6646 didn't jump to line 6652 because the condition on line 6646 was always true
6647 if not other or other in self:
6648 return True
6649 return set(self._ids) >= set(other._ids)
6650 except AttributeError:
6651 pass
6652 return NotImplemented
6654 def __int__(self) -> int:
6655 return self.id or 0
6657 def __repr__(self):
6658 return f"{self._name}{self._ids!r}"
6660 def __hash__(self):
6661 return hash((self._name, frozenset(self._ids)))
6663 def __deepcopy__(self, memo):
6664 return self
6666 @typing.overload
6667 def __getitem__(self, key: int | slice) -> Self: ...
6669 @typing.overload
6670 def __getitem__(self, key: str) -> typing.Any: ...
6672 def __getitem__(self, key):
6673 """ If ``key`` is an integer or a slice, return the corresponding record
6674 selection as an instance (attached to ``self.env``).
6675 Otherwise read the field ``key`` of the first record in ``self``.
6677 Examples::
6679 inst = model.search(dom) # inst is a recordset
6680 r4 = inst[3] # fourth record in inst
6681 rs = inst[10:20] # subset of inst
6682 nm = rs['name'] # name of first record in inst
6683 """
6684 if isinstance(key, str):
6685 # important: one must call the field's getter
6686 return self._fields[key].__get__(self)
6687 elif isinstance(key, slice):
6688 return self.browse(self._ids[key])
6689 else:
6690 return self.browse((self._ids[key],))
6692 def __setitem__(self, key: str, value: typing.Any):
6693 """ Assign the field ``key`` to ``value`` in record ``self``. """
6694 # important: one must call the field's setter
6695 return self._fields[key].__set__(self, value)
6697 #
6698 # Cache and recomputation management
6699 #
6701 @property
6702 def _cache(self):
6703 """ Return the cache of ``self``, mapping field names to values. """
6704 return RecordCache(self)
6706 @api.private
6707 def invalidate_model(self, fnames: Collection[str] | None = None, flush: bool = True) -> None:
6708 """ Invalidate the cache of all records of ``self``'s model, when the
6709 cached values no longer correspond to the database values. If the
6710 parameter is given, only the given fields are invalidated from cache.
6712 :param fnames: optional iterable of field names to invalidate
6713 :param flush: whether pending updates should be flushed before invalidation.
6714 It is ``True`` by default, which ensures cache consistency.
6715 Do not use this parameter unless you know what you are doing.
6716 """
6717 if flush: 6717 ↛ 6719line 6717 didn't jump to line 6719 because the condition on line 6717 was always true
6718 self.flush_model(fnames)
6719 self._invalidate_cache(fnames)
6721 @api.private
6722 def invalidate_recordset(self, fnames: Collection[str] | None = None, flush: bool = True) -> None:
6723 """ Invalidate the cache of the records in ``self``, when the cached
6724 values no longer correspond to the database values. If the parameter
6725 is given, only the given fields on ``self`` are invalidated from cache.
6727 :param fnames: optional iterable of field names to invalidate
6728 :param flush: whether pending updates should be flushed before invalidation.
6729 It is ``True`` by default, which ensures cache consistency.
6730 Do not use this parameter unless you know what you are doing.
6731 """
6732 if flush:
6733 self.flush_recordset(fnames)
6734 self._invalidate_cache(fnames, self._ids)
6736 def _invalidate_cache(self, fnames: Collection[str] | None = None, ids: Sequence[IdType] | None = None) -> None:
6737 if ids is not None and not ids: # Avoid invalidating field_inverses for no reason
6738 return
6740 if fnames is None:
6741 fields = self._fields.values()
6742 else:
6743 fields = [self._fields[fname] for fname in fnames]
6745 env = self.env
6746 for field in fields:
6747 field._invalidate_cache(env, ids)
6748 # TODO VSC: used to remove the inverse of many_to_one from the cache, though we might not need it anymore
6749 for invf in self.pool.field_inverses[field]:
6750 self.env[invf.model_name].flush_model([invf.name])
6751 invf._invalidate_cache(env)
6753 @api.private
6754 def modified(self, fnames: Collection[str], create: bool = False, before: bool = False) -> None:
6755 """ Notify that fields will be or have been modified on ``self``. This
6756 invalidates the cache where necessary, and prepares the recomputation of
6757 dependent stored fields.
6759 :param fnames: iterable of field names modified on records ``self``
6760 :param create: whether called in the context of record creation
6761 :param before: whether called before modifying records ``self``
6762 """
6763 if not self or not fnames:
6764 return
6766 # The triggers of a field F is a tree that contains the fields that
6767 # depend on F, together with the fields to inverse to find out which
6768 # records to recompute.
6769 #
6770 # For instance, assume that G depends on F, H depends on X.F, I depends
6771 # on W.X.F, and J depends on Y.F. The triggers of F will be the tree:
6772 #
6773 # [G]
6774 # X/ \Y
6775 # [H] [J]
6776 # W/
6777 # [I]
6778 #
6779 # This tree provides perfect support for the trigger mechanism:
6780 # when F is # modified on records,
6781 # - mark G to recompute on records,
6782 # - mark H to recompute on inverse(X, records),
6783 # - mark I to recompute on inverse(W, inverse(X, records)),
6784 # - mark J to recompute on inverse(Y, records).
6786 if before:
6787 # When called before modification, we should determine what
6788 # currently depends on self, and it should not be recomputed before
6789 # the modification. So we only collect what should be marked for
6790 # recomputation.
6791 marked = self.env.transaction.tocompute # {field: ids}
6792 tomark = defaultdict(OrderedSet) # {field: ids}
6793 else:
6794 # When called after modification, one should traverse backwards
6795 # dependencies by taking into account all fields already known to
6796 # be recomputed. In that case, we mark fieds to compute as soon as
6797 # possible.
6798 marked = {}
6799 tomark = self.env.transaction.tocompute
6801 # determine what to trigger (with iterators)
6802 todo = [self._modified([self._fields[fname] for fname in fnames], create)]
6804 # process what to trigger by lazily chaining todo
6805 for field, records, create in itertools.chain.from_iterable(todo):
6806 records -= self.env.protected(field)
6807 if not records:
6808 continue
6810 if field.recursive:
6811 # discard already processed records, in order to avoid cycles
6812 if field.compute and field.store:
6813 ids = (marked.get(field) or set()) | (tomark.get(field) or set())
6814 records = records.browse(id_ for id_ in records._ids if id_ not in ids)
6815 else:
6816 # get only records that have a value in cache (in any context)
6817 ids_in_cache = field._get_all_cache_ids(self.env)
6818 records = records.browse(id_ for id_ in records._ids if id_ in ids_in_cache)
6819 if not records:
6820 continue
6821 # recursively trigger recomputation of field's dependents
6822 todo.append(records._modified([field], create))
6824 # mark for recomputation (now or later, depending on 'before')
6825 if field.compute and field.store:
6826 tomark[field].update(records._ids)
6827 else:
6828 # Don't force the recomputation of compute fields which are
6829 # not stored as this is not really necessary.
6830 field._invalidate_cache(self.env, records._ids)
6832 if before:
6833 # effectively mark for recomputation now
6834 for field, ids in tomark.items():
6835 records = self.env[field.model_name].browse(ids)
6836 self.env.add_to_compute(field, records)
6838 def _modified(self, fields: list[Field], create: bool) -> Iterable[tuple[Field, BaseModel, bool]]:
6839 """ Return an iterator traversing a tree of field triggers on ``self``,
6840 traversing backwards field dependencies along the way, and yielding
6841 tuple ``(field, records, created)`` to recompute.
6842 """
6844 # The fields' trigger trees are merged in order to evaluate all triggers
6845 # at once. For non-stored computed fields, `_modified_triggers` might
6846 # traverse the tree (at the cost of extra queries) only to know which
6847 # records to invalidate in cache. But in many cases, most of these
6848 # fields have no data in cache, so they can be ignored from the start.
6849 # This allows us to discard subtrees from the merged tree when they
6850 # only contain such fields.
6851 def select(field):
6852 return (field.compute and field.store) or bool(field._get_all_cache_ids(self.env))
6854 tree = self.pool.get_trigger_tree(fields, select=select)
6855 if not tree:
6856 return ()
6858 return self.sudo().with_context(active_test=False)._modified_triggers(tree, create)
6860 def _modified_triggers(self, tree: TriggerTree, create: bool = False) -> Iterable[tuple[Field, BaseModel, bool]]:
6861 """ Return an iterator traversing a tree of field triggers on ``self``,
6862 traversing backwards field dependencies along the way, and yielding
6863 tuple ``(field, records, created)`` to recompute.
6864 """
6865 if not self:
6866 return
6868 # first yield what to compute
6869 for field in tree.root:
6870 yield field, self, create
6872 # then traverse dependencies backwards, and proceed recursively
6873 for field, subtree in tree.items():
6874 if create and field.type in ('many2one', 'many2one_reference'):
6875 # upon creation, no other record has a reference to self
6876 continue
6878 # subtree is another tree of dependencies
6879 model = self.env[field.model_name]
6880 for invf in model.pool.field_inverses[field]:
6881 # use an inverse of field without domain
6882 if not (invf.type in ('one2many', 'many2many') and invf.domain):
6883 if invf.type == 'many2one_reference':
6884 rec_ids = OrderedSet()
6885 for rec in self:
6886 try:
6887 if rec[invf.model_field] == field.model_name:
6888 rec_ids.add(rec[invf.name])
6889 except MissingError:
6890 continue
6891 records = model.browse(rec_ids)
6892 else:
6893 try:
6894 records = self[invf.name]
6895 except MissingError:
6896 records = self.exists()[invf.name]
6898 # TODO: find a better fix
6899 if field.model_name == records._name: 6899 ↛ 6880line 6899 didn't jump to line 6880 because the condition on line 6899 was always true
6900 if not any(self._ids): 6900 ↛ 6902line 6900 didn't jump to line 6902 because the condition on line 6900 was never true
6901 # if self are new, records should be new as well
6902 records = records.browse(it and NewId(it) for it in records._ids)
6903 break
6904 else:
6905 new_records = self.filtered(lambda r: not r.id)
6906 real_records = self - new_records
6907 records = model.browse()
6908 if real_records: 6908 ↛ 6910line 6908 didn't jump to line 6910 because the condition on line 6908 was always true
6909 records = model.search([(field.name, 'in', real_records.ids)], order='id')
6910 if new_records: 6910 ↛ 6911line 6910 didn't jump to line 6911 because the condition on line 6910 was never true
6911 field_cache = field._get_cache(model.env)
6912 cache_records = model.browse(field_cache)
6913 new_ids = set(self._ids)
6914 records |= cache_records.filtered(lambda r: not set(r[field.name]._ids).isdisjoint(new_ids))
6916 yield from records._modified_triggers(subtree)
6918 def _recompute_model(self, fnames: Collection[str] | None = None) -> None:
6919 """ Process the pending computations of the fields of ``self``'s model.
6921 :param fnames: optional iterable of field names to compute
6922 """
6923 if fnames is None:
6924 fields = self._fields.values()
6925 else:
6926 fields = [self._fields[fname] for fname in fnames]
6928 for field in fields:
6929 if field.compute and field.store:
6930 self._recompute_field(field)
6932 def _recompute_recordset(self, fnames: Collection[str] | None = None) -> None:
6933 """ Process the pending computations of the fields of the records in ``self``.
6935 :param fnames: optional iterable of field names to compute
6936 """
6937 if fnames is None:
6938 fields = self._fields.values()
6939 else:
6940 fields = [self._fields[fname] for fname in fnames]
6942 for field in fields:
6943 if field.compute and field.store:
6944 self._recompute_field(field, self._ids)
6946 def _recompute_field(self, field: Field, ids: Sequence[IdType] | None = None) -> None:
6947 ids_to_compute = self.env.transaction.tocompute.get(field, ())
6948 if ids is None:
6949 ids = ids_to_compute
6950 else:
6951 ids = [id_ for id_ in ids if id_ in ids_to_compute]
6952 if not ids:
6953 return
6955 # do not force recomputation on new records; those will be
6956 # recomputed by accessing the field on the records
6957 records = self.browse(tuple(id_ for id_ in ids if id_))
6958 field.recompute(records)
6960 #
6961 # Generic onchange method
6962 #
6964 def _has_onchange(self, field: Field, other_fields: Collection[Field]) -> bool:
6965 """ Return whether ``field`` should trigger an onchange event in the
6966 presence of ``other_fields``.
6967 """
6968 return (field.name in self._onchange_methods) or any(
6969 dep in other_fields
6970 for dep in self.pool.get_dependent_fields(field.base_field)
6971 )
6973 def _apply_onchange_methods(self, field_name: str, result: dict) -> None:
6974 """ Apply onchange method(s) for field ``field_name`` on ``self``. Value
6975 assignments are applied on ``self``, while warning messages are put
6976 in dictionary ``result``.
6977 """
6978 for method in self._onchange_methods.get(field_name, ()):
6979 res = method(self)
6980 if not res:
6981 continue
6982 if res.get('value'):
6983 for key, val in res['value'].items():
6984 if key in self._fields and key != 'id':
6985 self[key] = val
6986 if res.get('warning'):
6987 result['warnings'].add((
6988 res['warning'].get('title') or _("Warning"),
6989 res['warning'].get('message') or "",
6990 res['warning'].get('type') or "",
6991 ))
6993 def onchange(self, values: dict, field_names: list[str], fields_spec: dict) -> dict:
6994 raise NotImplementedError("onchange() is implemented in module 'web'")
6996 def _get_placeholder_filename(self, field: str) -> bool:
6997 """ Returns the filename of the placeholder to use,
6998 set on web/static/img by default, or the
6999 complete path to access it (eg: module/path/to/image.png).
7000 """
7001 return False
7004collections.abc.Set.register(BaseModel)
7005# not exactly true as BaseModel doesn't have index or count
7006collections.abc.Sequence.register(BaseModel)
7009class RecordCache(Mapping[str, typing.Any]):
7010 """ A mapping from field names to values, to read the cache of a record. """
7011 __slots__ = ['_record']
7013 def __init__(self, record: BaseModel):
7014 assert len(record) == 1, "Unexpected RecordCache(%s)" % record
7015 self._record = record
7017 def __contains__(self, name):
7018 """ Return whether `record` has a cached value for field ``name``. """
7019 record = self._record
7020 field = record._fields[name]
7021 return record.id in field._get_cache(record.env)
7023 def __getitem__(self, name):
7024 """ Return the cached value of field ``name`` for `record`. """
7025 record = self._record
7026 field = record._fields[name]
7027 return field._get_cache(record.env)[record.id]
7029 def __iter__(self):
7030 """ Iterate over the field names with a cached value. """
7031 record = self._record
7032 id_ = record.id
7033 env = record.env
7034 for name, field in record._fields.items():
7035 if id_ in field._get_cache(env):
7036 yield name
7038 def __len__(self):
7039 """ Return the number of fields with a cached value. """
7040 return sum(1 for name in self)
7043AbstractModel = BaseModel
7046class Model(AbstractModel):
7047 """ Main super-class for regular database-persisted Odoo models.
7049 Odoo models are created by inheriting from this class::
7051 class ResUsers(Model):
7052 ...
7054 The system will later instantiate the class once per database (on
7055 which the class' module is installed).
7056 """
7057 _auto: bool = True # automatically create database backend
7058 _register: bool = False # not visible in ORM registry, meant to be python-inherited only
7059 _abstract: typing.Literal[False] = False # not abstract
7062@functools.total_ordering
7063class ReversibleComparator:
7064 __slots__ = ('__item', '__none_first', '__reverse')
7066 def __init__(self, item, reverse: bool, none_first: bool):
7067 self.__item = item
7068 self.__reverse = reverse
7069 self.__none_first = none_first
7071 def __lt__(self, other: ReversibleComparator) -> bool:
7072 item = self.__item
7073 item_cmp = other.__item
7074 if item == item_cmp: 7074 ↛ 7075line 7074 didn't jump to line 7075 because the condition on line 7074 was never true
7075 return False
7076 if item is None: 7076 ↛ 7077line 7076 didn't jump to line 7077 because the condition on line 7076 was never true
7077 return self.__none_first
7078 if item_cmp is None: 7078 ↛ 7079line 7078 didn't jump to line 7079 because the condition on line 7078 was never true
7079 return not self.__none_first
7080 if self.__reverse: 7080 ↛ 7081line 7080 didn't jump to line 7081 because the condition on line 7080 was never true
7081 item, item_cmp = item_cmp, item
7082 return item < item_cmp
7084 def __eq__(self, other: ReversibleComparator) -> bool:
7085 return self.__item == other.__item
7087 def __hash__(self):
7088 return hash(self.__item)
7090 def __repr__(self):
7091 return f"<ReversibleComparator {self.__item!r}{' reverse' if self.__reverse else ''}>"
7094def itemgetter_tuple(items):
7095 """ Fixes itemgetter inconsistency (useful in some cases) of not returning
7096 a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
7097 """
7098 if len(items) == 0:
7099 return lambda a: ()
7100 if len(items) == 1:
7101 return lambda gettable: (gettable[items[0]],)
7102 return itemgetter(*items)
7105def get_columns_from_sql_diagnostics(cr, diagnostics, *, check_registry=False) -> list[str]:
7106 """Given the diagnostics of an error, return the affected column names by the constraint.
7107 Return an empty list if we cannot determine the columns.
7108 """
7109 if column := diagnostics.column_name:
7110 return [column]
7111 if not check_registry:
7112 return []
7113 cr.execute(SQL("""
7114 SELECT
7115 ARRAY(
7116 SELECT attname FROM pg_attribute
7117 WHERE attrelid = conrelid
7118 AND attnum = ANY(conkey)
7119 ) as "columns"
7120 FROM pg_constraint
7121 JOIN pg_class t ON t.oid = conrelid
7122 WHERE conname = %s
7123 AND t.relname = %s
7124 AND t.relnamespace = current_schema::regnamespace
7125 """, diagnostics.constraint_name, diagnostics.table_name))
7126 columns = cr.fetchone()
7127 return columns[0] if columns else []