Coverage for adhoc-cicd-odoo-odoo / odoo / _monkeypatches / werkzeug.py: 34%

472 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-03-09 18:15 +0000

1# ruff: noqa: PLC0415 (import in function not at top-level) 

2from __future__ import annotations 

3 

4import contextlib 

5import operator 

6import os 

7import re 

8import sys 

9import typing as t 

10import warnings 

11from shutil import copyfileobj 

12from types import CodeType 

13 

14from werkzeug import urls 

15from werkzeug.datastructures import FileStorage, MultiDict 

16from werkzeug.routing import Rule 

17from werkzeug.urls import _decode_idna 

18from werkzeug.wrappers import Request, Response 

19 

20Rule_get_func_code = hasattr(Rule, '_get_func_code') and Rule._get_func_code 

21 

22 

23def _check_str_tuple(value: t.Tuple[t.AnyStr, ...]) -> None: 

24 """Ensure tuple items are all strings or all bytes.""" 

25 if not value: 25 ↛ 26line 25 didn't jump to line 26 because the condition on line 25 was never true

26 return 

27 

28 item_type = str if isinstance(value[0], str) else bytes 

29 

30 if any(not isinstance(item, item_type) for item in value): 30 ↛ 31line 30 didn't jump to line 31 because the condition on line 30 was never true

31 raise TypeError(f"Cannot mix str and bytes arguments (got {value!r})") 

32 

33 

34def _make_encode_wrapper(reference: t.AnyStr) -> t.Callable[[str], t.AnyStr]: 

35 """Create a function that will be called with a string argument. If 

36 the reference is bytes, values will be encoded to bytes. 

37 """ 

38 if isinstance(reference, str): 38 ↛ 41line 38 didn't jump to line 41 because the condition on line 38 was always true

39 return lambda x: x 

40 

41 return operator.methodcaller("encode", "latin1") 

42 

43 

44_default_encoding = sys.getdefaultencoding() 

45 

46 

47def _to_str( 

48 x: t.Optional[t.Any], 

49 charset: t.Optional[str] = _default_encoding, 

50 errors: str = "strict", 

51 allow_none_charset: bool = False, 

52): 

53 if x is None or isinstance(x, str): 53 ↛ 56line 53 didn't jump to line 56 because the condition on line 53 was always true

54 return x 

55 

56 if not isinstance(x, (bytes, bytearray)): 

57 return str(x) 

58 

59 if charset is None: 

60 if allow_none_charset: 

61 return x 

62 

63 return x.decode(charset, errors) # type: ignore 

64 

65 

66if t.TYPE_CHECKING: 66 ↛ 67line 66 didn't jump to line 67 because the condition on line 66 was never true

67 from werkzeug import datastructures as ds 

68 

69# A regular expression for what a valid schema looks like 

70_scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$") 

71 

72# Characters that are safe in any part of an URL. 

73_always_safe_chars = ( 

74 "abcdefghijklmnopqrstuvwxyz" 

75 "ABCDEFGHIJKLMNOPQRSTUVWXYZ" 

76 "0123456789" 

77 "-._~" 

78) 

79_always_safe = frozenset(_always_safe_chars.encode("ascii")) 

80 

81_hexdigits = "0123456789ABCDEFabcdef" 

82_hextobyte = { 

83 f"{a}{b}".encode("ascii"): int(f"{a}{b}", 16) 

84 for a in _hexdigits 

85 for b in _hexdigits 

86} 

87_bytetohex = [f"%{char:02X}".encode("ascii") for char in range(256)] 

88 

89 

90class _URLTuple(t.NamedTuple): 

91 scheme: str 

92 netloc: str 

93 path: str 

94 query: str 

95 fragment: str 

96 

97 

98class BaseURL(_URLTuple): 

99 """Superclass of :py:class:`URL` and :py:class:`BytesURL`. 

100 

101 .. deprecated:: 2.3 

102 Will be removed in Werkzeug 2.4. Use the ``urllib.parse`` library instead. 

103 """ 

104 

105 __slots__ = () 

106 _at: str 

107 _colon: str 

108 _lbracket: str 

109 _rbracket: str 

110 

111 def __new__(cls, *args: t.Any, **kwargs: t.Any) -> BaseURL: # noqa: PYI034 

112 return super().__new__(cls, *args, **kwargs) 

113 

114 def __str__(self) -> str: 

115 return self.to_url() 

116 

117 def replace(self, **kwargs: t.Any) -> BaseURL: 

118 """Return an URL with the same values, except for those parameters 

119 given new values by whichever keyword arguments are specified.""" 

120 return self._replace(**kwargs) 

121 

122 @property 

123 def host(self) -> str | None: 

124 """The host part of the URL if available, otherwise `None`. The 

125 host is either the hostname or the IP address mentioned in the 

126 URL. It will not contain the port. 

127 """ 

128 return self._split_host()[0] 

129 

130 @property 

131 def ascii_host(self) -> str | None: 

132 """Works exactly like :attr:`host` but will return a result that 

133 is restricted to ASCII. If it finds a netloc that is not ASCII 

134 it will attempt to idna decode it. This is useful for socket 

135 operations when the URL might include internationalized characters. 

136 """ 

137 rv = self.host 

138 if rv is not None and isinstance(rv, str): 

139 with contextlib.suppress(UnicodeError): 

140 rv = rv.encode("idna").decode("ascii") 

141 return rv 

142 

143 @property 

144 def port(self) -> int | None: 

145 """The port in the URL as an integer if it was present, `None` 

146 otherwise. This does not fill in default ports. 

147 """ 

148 try: 

149 rv = int(_to_str(self._split_host()[1])) 

150 if 0 <= rv <= 65535: 

151 return rv 

152 except (ValueError, TypeError): 

153 pass 

154 return None 

155 

156 @property 

157 def auth(self) -> str | None: 

158 """The authentication part in the URL if available, `None` 

159 otherwise. 

160 """ 

161 return self._split_netloc()[0] 

162 

163 @property 

164 def username(self) -> str | None: 

165 """The username if it was part of the URL, `None` otherwise. 

166 This undergoes URL decoding and will always be a string. 

167 """ 

168 rv = self._split_auth()[0] 

169 if rv is not None: 

170 return _url_unquote_legacy(rv) 

171 return None 

172 

173 @property 

174 def raw_username(self) -> str | None: 

175 """The username if it was part of the URL, `None` otherwise. 

176 Unlike :attr:`username` this one is not being decoded. 

177 """ 

178 return self._split_auth()[0] 

179 

180 @property 

181 def password(self) -> str | None: 

182 """The password if it was part of the URL, `None` otherwise. 

183 This undergoes URL decoding and will always be a string. 

184 """ 

185 rv = self._split_auth()[1] 

186 if rv is not None: 

187 return _url_unquote_legacy(rv) 

188 return None 

189 

190 @property 

191 def raw_password(self) -> str | None: 

192 """The password if it was part of the URL, `None` otherwise. 

193 Unlike :attr:`password` this one is not being decoded. 

194 """ 

195 return self._split_auth()[1] 

196 

197 def decode_query(self, *args: t.Any, **kwargs: t.Any) -> ds.MultiDict[str, str]: 

198 """Decodes the query part of the URL. Ths is a shortcut for 

199 calling :func:`url_decode` on the query argument. The arguments and 

200 keyword arguments are forwarded to :func:`url_decode` unchanged. 

201 """ 

202 return url_decode(self.query, *args, **kwargs) 

203 

204 def join(self, *args: t.Any, **kwargs: t.Any) -> BaseURL: 

205 """Joins this URL with another one. This is just a convenience 

206 function for calling into :meth:`url_join` and then parsing the 

207 return value again. 

208 """ 

209 return url_parse(url_join(self, *args, **kwargs)) 

210 

211 def to_url(self) -> str: 

212 """Returns a URL string or bytes depending on the type of the 

213 information stored. This is just a convenience function 

214 for calling :meth:`url_unparse` for this URL. 

215 """ 

216 return url_unparse(self) 

217 

218 def encode_netloc(self) -> str: 

219 """Encodes the netloc part to an ASCII safe URL as bytes.""" 

220 rv = self.ascii_host or "" 

221 if ":" in rv: 

222 rv = f"[{rv}]" 

223 port = self.port 

224 if port is not None: 

225 rv = f"{rv}:{port}" 

226 auth = ":".join( 

227 filter( 

228 None, 

229 [ 

230 url_quote(self.raw_username or "", "utf-8", "strict", "/:%"), 

231 url_quote(self.raw_password or "", "utf-8", "strict", "/:%"), 

232 ], 

233 ) 

234 ) 

235 if auth: 

236 rv = f"{auth}@{rv}" 

237 return rv 

238 

239 def decode_netloc(self) -> str: 

240 """Decodes the netloc part into a string.""" 

241 host = self.host or "" 

242 

243 if isinstance(host, bytes): 

244 host = host.decode() 

245 

246 rv = _decode_idna(host) 

247 

248 if ":" in rv: 

249 rv = f"[{rv}]" 

250 port = self.port 

251 if port is not None: 

252 rv = f"{rv}:{port}" 

253 auth = ":".join( 

254 filter( 

255 None, 

256 [ 

257 _url_unquote_legacy(self.raw_username or "", "/:%@"), 

258 _url_unquote_legacy(self.raw_password or "", "/:%@"), 

259 ], 

260 ) 

261 ) 

262 if auth: 

263 rv = f"{auth}@{rv}" 

264 return rv 

265 

266 def get_file_location( 

267 self, pathformat: str | None = None 

268 ) -> tuple[str | None, str | None]: 

269 """Returns a tuple with the location of the file in the form 

270 ``(server, location)``. If the netloc is empty in the URL or 

271 points to localhost, it's represented as ``None``. 

272 

273 The `pathformat` by default is autodetection but needs to be set 

274 when working with URLs of a specific system. The supported values 

275 are ``'windows'`` when working with Windows or DOS paths and 

276 ``'posix'`` when working with posix paths. 

277 

278 If the URL does not point to a local file, the server and location 

279 are both represented as ``None``. 

280 

281 :param pathformat: The expected format of the path component. 

282 Currently ``'windows'`` and ``'posix'`` are 

283 supported. Defaults to ``None`` which is 

284 autodetect. 

285 """ 

286 if self.scheme != "file": 

287 return None, None 

288 

289 path = url_unquote(self.path) 

290 host = self.netloc or None 

291 

292 if pathformat is None: 

293 if os.name == "nt": 

294 pathformat = "windows" 

295 else: 

296 pathformat = "posix" 

297 

298 if pathformat == "windows": 

299 if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:": 

300 path = f"{path[1:2]}:{path[3:]}" 

301 windows_share = path[:3] in ("\\" * 3, "/" * 3) 

302 import ntpath 

303 

304 path = ntpath.normpath(path) 

305 # Windows shared drives are represented as ``\\host\\directory``. 

306 # That results in a URL like ``file://///host/directory``, and a 

307 # path like ``///host/directory``. We need to special-case this 

308 # because the path contains the hostname. 

309 if windows_share and host is None: 

310 parts = path.lstrip("\\").split("\\", 1) 

311 if len(parts) == 2: 

312 host, path = parts 

313 else: 

314 host = parts[0] 

315 path = "" 

316 elif pathformat == "posix": 

317 import posixpath 

318 

319 path = posixpath.normpath(path) 

320 else: 

321 raise TypeError(f"Invalid path format {pathformat!r}") 

322 

323 if host in ("127.0.0.1", "::1", "localhost"): 

324 host = None 

325 

326 return host, path 

327 

328 def _split_netloc(self) -> tuple[str | None, str]: 

329 if self._at in self.netloc: 

330 auth, _, netloc = self.netloc.partition(self._at) 

331 return auth, netloc 

332 return None, self.netloc 

333 

334 def _split_auth(self) -> tuple[str | None, str | None]: 

335 auth = self._split_netloc()[0] 

336 if not auth: 

337 return None, None 

338 if self._colon not in auth: 

339 return auth, None 

340 

341 username, _, password = auth.partition(self._colon) 

342 return username, password 

343 

344 def _split_host(self) -> tuple[str | None, str | None]: 

345 rv = self._split_netloc()[1] 

346 if not rv: 

347 return None, None 

348 

349 if not rv.startswith(self._lbracket): 

350 if self._colon in rv: 

351 host, _, port = rv.partition(self._colon) 

352 return host, port 

353 return rv, None 

354 

355 idx = rv.find(self._rbracket) 

356 if idx < 0: 

357 return rv, None 

358 

359 host = rv[1:idx] 

360 rest = rv[idx + 1 :] 

361 if rest.startswith(self._colon): 

362 return host, rest[1:] 

363 return host, None 

364 

365 

366class URL(BaseURL): 

367 """Represents a parsed URL. This behaves like a regular tuple but 

368 also has some extra attributes that give further insight into the 

369 URL. 

370 

371 .. deprecated:: 2.3 

372 Will be removed in Werkzeug 2.4. Use the ``urllib.parse`` library instead. 

373 """ 

374 

375 __slots__ = () 

376 _at = "@" 

377 _colon = ":" 

378 _lbracket = "[" 

379 _rbracket = "]" 

380 

381 def encode(self, charset: str = "utf-8", errors: str = "replace") -> BytesURL: 

382 """Encodes the URL to a tuple made out of bytes. The charset is 

383 only being used for the path, query and fragment. 

384 """ 

385 with warnings.catch_warnings(): 

386 warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning) 

387 return BytesURL( 

388 self.scheme.encode("ascii"), 

389 self.encode_netloc(), 

390 self.path.encode(charset, errors), 

391 self.query.encode(charset, errors), 

392 self.fragment.encode(charset, errors), 

393 ) 

394 

395 

396class BytesURL(BaseURL): 

397 """Represents a parsed URL in bytes. 

398 

399 .. deprecated:: 2.3 

400 Will be removed in Werkzeug 2.4. Use the ``urllib.parse`` library instead. 

401 """ 

402 

403 __slots__ = () 

404 _at = b"@" # type: ignore 

405 _colon = b":" # type: ignore 

406 _lbracket = b"[" # type: ignore 

407 _rbracket = b"]" # type: ignore 

408 

409 def __str__(self) -> str: 

410 return self.to_url().decode("utf-8", "replace") # type: ignore 

411 

412 def encode_netloc(self) -> bytes: # type: ignore 

413 """Returns the netloc unchanged as bytes.""" 

414 return self.netloc # type: ignore 

415 

416 def decode(self, charset: str = "utf-8", errors: str = "replace") -> URL: 

417 """Decodes the URL to a tuple made out of strings. The charset is 

418 only being used for the path, query and fragment. 

419 """ 

420 with warnings.catch_warnings(): 

421 warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning) 

422 return URL( 

423 self.scheme.decode("ascii"), # type: ignore 

424 self.decode_netloc(), 

425 self.path.decode(charset, errors), # type: ignore 

426 self.query.decode(charset, errors), # type: ignore 

427 self.fragment.decode(charset, errors), # type: ignore 

428 ) 

429 

430 

431_unquote_maps: dict[frozenset[int], dict[bytes, int]] = {frozenset(): _hextobyte} 

432 

433 

434def _unquote_to_bytes(string: str | bytes, unsafe: str | bytes = "") -> bytes: 

435 if isinstance(string, str): 

436 string = string.encode("utf-8") 

437 

438 if isinstance(unsafe, str): 

439 unsafe = unsafe.encode("utf-8") 

440 

441 unsafe = frozenset(bytearray(unsafe)) 

442 groups = iter(string.split(b"%")) 

443 result = bytearray(next(groups, b"")) 

444 

445 try: 

446 hex_to_byte = _unquote_maps[unsafe] 

447 except KeyError: 

448 hex_to_byte = _unquote_maps[unsafe] = { 

449 h: b for h, b in _hextobyte.items() if b not in unsafe 

450 } 

451 

452 for group in groups: 

453 code = group[:2] 

454 

455 if code in hex_to_byte: 

456 result.append(hex_to_byte[code]) 

457 result.extend(group[2:]) 

458 else: 

459 result.append(37) # % 

460 result.extend(group) 

461 

462 return bytes(result) 

463 

464 

465def _url_encode_impl( 

466 obj: t.Mapping[str, str] | t.Iterable[tuple[str, str]], 

467 charset: str, 

468 sort: bool, 

469 key: t.Callable[[tuple[str, str]], t.Any] | None, 

470) -> t.Iterator[str]: 

471 from werkzeug.datastructures import iter_multi_items 

472 

473 iterable: t.Iterable[tuple[str, str]] = iter_multi_items(obj) 

474 

475 if sort: 

476 iterable = sorted(iterable, key=key) 

477 

478 for key_str, value_str in iterable: 

479 if value_str is None: 479 ↛ 480line 479 didn't jump to line 480 because the condition on line 479 was never true

480 continue 

481 

482 if not isinstance(key_str, bytes): 482 ↛ 485line 482 didn't jump to line 485 because the condition on line 482 was always true

483 key_bytes = str(key_str).encode(charset) 

484 else: 

485 key_bytes = key_str 

486 

487 if not isinstance(value_str, bytes): 487 ↛ 490line 487 didn't jump to line 490 because the condition on line 487 was always true

488 value_bytes = str(value_str).encode(charset) 

489 else: 

490 value_bytes = value_str 

491 

492 yield f"{_fast_url_quote_plus(key_bytes)}={_fast_url_quote_plus(value_bytes)}" 

493 

494 

495def _url_unquote_legacy(value: str, unsafe: str = "") -> str: 

496 try: 

497 return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe) 

498 except UnicodeError: 

499 return url_unquote(value, charset="latin1", unsafe=unsafe) 

500 

501 

502def url_parse( 

503 url: str, scheme: str | None = None, allow_fragments: bool = True 

504) -> BaseURL: 

505 """Parses a URL from a string into a :class:`URL` tuple. If the URL 

506 is lacking a scheme it can be provided as second argument. Otherwise, 

507 it is ignored. Optionally fragments can be stripped from the URL 

508 by setting `allow_fragments` to `False`. 

509 

510 The inverse of this function is :func:`url_unparse`. 

511 

512 :param url: the URL to parse. 

513 :param scheme: the default schema to use if the URL is schemaless. 

514 :param allow_fragments: if set to `False` a fragment will be removed 

515 from the URL. 

516 

517 .. deprecated:: 2.3 

518 Will be removed in Werkzeug 2.4. Use ``urllib.parse.urlsplit`` instead. 

519 """ 

520 s = _make_encode_wrapper(url) 

521 is_text_based = isinstance(url, str) 

522 

523 if scheme is None: 523 ↛ 525line 523 didn't jump to line 525 because the condition on line 523 was always true

524 scheme = s("") 

525 netloc = query = fragment = s("") 

526 i = url.find(s(":")) 

527 if i > 0 and _scheme_re.match(_to_str(url[:i], errors="replace")): 

528 # make sure "iri" is not actually a port number (in which case 

529 # "scheme" is really part of the path) 

530 rest = url[i + 1 :] 

531 if not rest or any(c not in s("0123456789") for c in rest): 531 ↛ 535line 531 didn't jump to line 535 because the condition on line 531 was always true

532 # not a port number 

533 scheme, url = url[:i].lower(), rest 

534 

535 if url[:2] == s("//"): 

536 delim = len(url) 

537 for c in s("/?#"): 

538 wdelim = url.find(c, 2) 

539 if wdelim >= 0: 

540 delim = min(delim, wdelim) 

541 netloc, url = url[2:delim], url[delim:] 

542 if (s("[") in netloc and s("]") not in netloc) or ( 542 ↛ 545line 542 didn't jump to line 545 because the condition on line 542 was never true

543 s("]") in netloc and s("[") not in netloc 

544 ): 

545 raise ValueError("Invalid IPv6 URL") 

546 

547 if allow_fragments and s("#") in url: 547 ↛ 548line 547 didn't jump to line 548 because the condition on line 547 was never true

548 url, fragment = url.split(s("#"), 1) 

549 if s("?") in url: 549 ↛ 550line 549 didn't jump to line 550 because the condition on line 549 was never true

550 url, query = url.split(s("?"), 1) 

551 

552 result_type = URL if is_text_based else BytesURL 

553 

554 with warnings.catch_warnings(): 

555 warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning) 

556 return result_type(scheme, netloc, url, query, fragment) 

557 

558 

559def _make_fast_url_quote( 

560 charset: str = "utf-8", 

561 errors: str = "strict", 

562 safe: str | bytes = "/:", 

563 unsafe: str | bytes = "", 

564) -> t.Callable[[bytes], str]: 

565 """Precompile the translation table for a URL encoding function. 

566 

567 Unlike :func:`url_quote`, the generated function only takes the 

568 string to quote. 

569 

570 :param charset: The charset to encode the result with. 

571 :param errors: How to handle encoding errors. 

572 :param safe: An optional sequence of safe characters to never encode. 

573 :param unsafe: An optional sequence of unsafe characters to always encode. 

574 """ 

575 if isinstance(safe, str): 575 ↛ 578line 575 didn't jump to line 578 because the condition on line 575 was always true

576 safe = safe.encode(charset, errors) 

577 

578 if isinstance(unsafe, str): 578 ↛ 581line 578 didn't jump to line 581 because the condition on line 578 was always true

579 unsafe = unsafe.encode(charset, errors) 

580 

581 safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe)) 

582 table = [chr(c) if c in safe else f"%{c:02X}" for c in range(256)] 

583 

584 def quote(string: bytes) -> str: 

585 return "".join([table[c] for c in string]) 

586 

587 return quote 

588 

589 

590_fast_url_quote = _make_fast_url_quote() 

591_fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+") 

592 

593 

594def _fast_url_quote_plus(string: bytes) -> str: 

595 return _fast_quote_plus(string).replace(" ", "+") 

596 

597 

598def url_quote( 

599 string: str | bytes, 

600 charset: str = "utf-8", 

601 errors: str = "strict", 

602 safe: str | bytes = "/:", 

603 unsafe: str | bytes = "", 

604) -> str: 

605 """URL encode a single string with a given encoding. 

606 

607 :param s: the string to quote. 

608 :param charset: the charset to be used. 

609 :param safe: an optional sequence of safe characters. 

610 :param unsafe: an optional sequence of unsafe characters. 

611 

612 .. deprecated:: 2.3 

613 Will be removed in Werkzeug 2.4. Use ``urllib.parse.quote`` instead. 

614 

615 .. versionadded:: 0.9.2 

616 The `unsafe` parameter was added. 

617 """ 

618 

619 if not isinstance(string, (str, bytes, bytearray)): 

620 string = str(string) 

621 if isinstance(string, str): 

622 string = string.encode(charset, errors) 

623 if isinstance(safe, str): 

624 safe = safe.encode(charset, errors) 

625 if isinstance(unsafe, str): 

626 unsafe = unsafe.encode(charset, errors) 

627 safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe)) 

628 rv = bytearray() 

629 for char in bytearray(string): 

630 if char in safe: 

631 rv.append(char) 

632 else: 

633 rv.extend(_bytetohex[char]) 

634 return bytes(rv).decode(charset) 

635 

636 

637def url_quote_plus( 

638 string: str, charset: str = "utf-8", errors: str = "strict", safe: str = "" 

639) -> str: 

640 """URL encode a single string with the given encoding and convert 

641 whitespace to "+". 

642 

643 :param s: The string to quote. 

644 :param charset: The charset to be used. 

645 :param safe: An optional sequence of safe characters. 

646 

647 .. deprecated:: 2.3 

648 Will be removed in Werkzeug 2.4. Use ``urllib.parse.quote_plus`` instead. 

649 """ 

650 

651 with warnings.catch_warnings(): 

652 warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning) 

653 return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+") 

654 

655 

656def url_unparse(components: tuple[str, str, str, str, str]) -> str: 

657 """The reverse operation to :meth:`url_parse`. This accepts arbitrary 

658 as well as :class:`URL` tuples and returns a URL as a string. 

659 

660 :param components: the parsed URL as tuple which should be converted 

661 into a URL string. 

662 

663 .. deprecated:: 2.3 

664 Will be removed in Werkzeug 2.4. Use ``urllib.parse.urlunsplit`` instead. 

665 """ 

666 

667 _check_str_tuple(components) 

668 scheme, netloc, path, query, fragment = components 

669 s = _make_encode_wrapper(scheme) 

670 url = s("") 

671 

672 # We generally treat file:///x and file:/x the same which is also 

673 # what browsers seem to do. This also allows us to ignore a schema 

674 # register for netloc utilization or having to differentiate between 

675 # empty and missing netloc. 

676 if netloc or (scheme and path.startswith(s("/"))): 676 ↛ 680line 676 didn't jump to line 680 because the condition on line 676 was always true

677 if path and path[:1] != s("/"): 677 ↛ 678line 677 didn't jump to line 678 because the condition on line 677 was never true

678 path = s("/") + path 

679 url = s("//") + (netloc or s("")) + path 

680 elif path: 

681 url += path 

682 if scheme: 682 ↛ 684line 682 didn't jump to line 684 because the condition on line 682 was always true

683 url = scheme + s(":") + url 

684 if query: 684 ↛ 685line 684 didn't jump to line 685 because the condition on line 684 was never true

685 url = url + s("?") + query 

686 if fragment: 686 ↛ 687line 686 didn't jump to line 687 because the condition on line 686 was never true

687 url = url + s("#") + fragment 

688 return url 

689 

690 

691def url_unquote( 

692 s: str | bytes, 

693 charset: str = "utf-8", 

694 errors: str = "replace", 

695 unsafe: str = "", 

696) -> str: 

697 """URL decode a single string with a given encoding. If the charset 

698 is set to `None` no decoding is performed and raw bytes are 

699 returned. 

700 

701 :param s: the string to unquote. 

702 :param charset: the charset of the query string. If set to `None` 

703 no decoding will take place. 

704 :param errors: the error handling for the charset decoding. 

705 

706 .. deprecated:: 2.3 

707 Will be removed in Werkzeug 2.4. Use ``urllib.parse.unquote`` instead. 

708 """ 

709 rv = _unquote_to_bytes(s, unsafe) 

710 if charset is None: 

711 return rv 

712 return rv.decode(charset, errors) 

713 

714 

715def url_unquote_plus( 

716 s: str | bytes, charset: str = "utf-8", errors: str = "replace" 

717) -> str: 

718 """URL decode a single string with the given `charset` and decode "+" to 

719 whitespace. 

720 

721 Per default encoding errors are ignored. If you want a different behavior 

722 you can set `errors` to ``'replace'`` or ``'strict'``. 

723 

724 :param s: The string to unquote. 

725 :param charset: the charset of the query string. If set to `None` 

726 no decoding will take place. 

727 :param errors: The error handling for the `charset` decoding. 

728 

729 .. deprecated:: 2.3 

730 Will be removed in Werkzeug 2.4. Use ``urllib.parse.unquote_plus`` instead. 

731 """ 

732 if isinstance(s, str): 

733 s = s.replace("+", " ") 

734 else: 

735 s = s.replace(b"+", b" ") 

736 

737 with warnings.catch_warnings(): 

738 warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning) 

739 return url_unquote(s, charset, errors) 

740 

741 

742def url_fix(s: str, charset: str = "utf-8") -> str: 

743 r"""Sometimes you get an URL by a user that just isn't a real URL because 

744 it contains unsafe characters like ' ' and so on. This function can fix 

745 some of the problems in a similar way browsers handle data entered by the 

746 user: 

747 

748 >>> url_fix('http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') 

749 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' 

750 

751 :param s: the string with the URL to fix. 

752 :param charset: The target charset for the URL if the url was given 

753 as a string. 

754 

755 .. deprecated:: 2.3 

756 Will be removed in Werkzeug 2.4. 

757 """ 

758 # First step is to switch to text processing and to convert 

759 # backslashes (which are invalid in URLs anyways) to slashes. This is 

760 # consistent with what Chrome does. 

761 s = _to_str(s, charset, "replace").replace("\\", "/") 

762 

763 # For the specific case that we look like a malformed windows URL 

764 # we want to fix this up manually: 

765 if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"): 

766 s = f"file:///{s[7:]}" 

767 

768 with warnings.catch_warnings(): 

769 warnings.filterwarnings("ignore", "'werkzeug", DeprecationWarning) 

770 url = url_parse(s) 

771 path = url_quote(url.path, charset, safe="/%+$!*'(),") 

772 qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),") 

773 anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),") 

774 return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)) 

775 

776 

777def url_decode( 

778 s: t.AnyStr, 

779 charset: str = "utf-8", 

780 include_empty: bool = True, 

781 errors: str = "replace", 

782 separator: str = "&", 

783 cls: type[ds.MultiDict] | None = None, 

784) -> ds.MultiDict[str, str]: 

785 """Parse a query string and return it as a :class:`MultiDict`. 

786 

787 :param s: The query string to parse. 

788 :param charset: Decode bytes to string with this charset. If not 

789 given, bytes are returned as-is. 

790 :param include_empty: Include keys with empty values in the dict. 

791 :param errors: Error handling behavior when decoding bytes. 

792 :param separator: Separator character between pairs. 

793 :param cls: Container to hold result instead of :class:`MultiDict`. 

794 

795 .. deprecated:: 2.3 

796 Will be removed in Werkzeug 2.4. Use ``urllib.parse.parse_qs`` instead. 

797 

798 .. versionchanged:: 2.1 

799 The ``decode_keys`` parameter was removed. 

800 

801 .. versionchanged:: 0.5 

802 In previous versions ";" and "&" could be used for url decoding. 

803 Now only "&" is supported. If you want to use ";", a different 

804 ``separator`` can be provided. 

805 

806 .. versionchanged:: 0.5 

807 The ``cls`` parameter was added. 

808 """ 

809 if cls is None: 

810 from werkzeug.datastructures import MultiDict # noqa: F811 

811 

812 cls = MultiDict 

813 if isinstance(s, str) and not isinstance(separator, str): 

814 separator = separator.decode(charset or "ascii") 

815 elif isinstance(s, bytes) and not isinstance(separator, bytes): 

816 separator = separator.encode(charset or "ascii") # type: ignore 

817 return cls( 

818 _url_decode_impl( 

819 s.split(separator), charset, include_empty, errors # type: ignore 

820 ) 

821 ) 

822 

823 

824def url_decode_stream( 

825 stream: t.IO[bytes], 

826 charset: str = "utf-8", 

827 include_empty: bool = True, 

828 errors: str = "replace", 

829 separator: bytes = b"&", 

830 cls: type[ds.MultiDict] | None = None, 

831 limit: int | None = None, 

832) -> ds.MultiDict[str, str]: 

833 """Works like :func:`url_decode` but decodes a stream. The behavior 

834 of stream and limit follows functions like 

835 :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is 

836 directly fed to the `cls` so you can consume the data while it's 

837 parsed. 

838 

839 :param stream: a stream with the encoded querystring 

840 :param charset: the charset of the query string. If set to `None` 

841 no decoding will take place. 

842 :param include_empty: Set to `False` if you don't want empty values to 

843 appear in the dict. 

844 :param errors: the decoding error behavior. 

845 :param separator: the pair separator to be used, defaults to ``&`` 

846 :param cls: an optional dict class to use. If this is not specified 

847 or `None` the default :class:`MultiDict` is used. 

848 :param limit: the content length of the URL data. Not necessary if 

849 a limited stream is provided. 

850 

851 .. deprecated:: 2.3 

852 Will be removed in Werkzeug 2.4. Use ``urllib.parse.parse_qs`` instead. 

853 

854 .. versionchanged:: 2.1 

855 The ``decode_keys`` and ``return_iterator`` parameters were removed. 

856 

857 .. versionadded:: 0.8 

858 """ 

859 

860 from werkzeug.wsgi import make_chunk_iter 

861 

862 pair_iter = make_chunk_iter(stream, separator, limit) 

863 decoder = _url_decode_impl(pair_iter, charset, include_empty, errors) 

864 

865 if cls is None: 

866 from werkzeug.datastructures import MultiDict # noqa: F811 

867 

868 cls = MultiDict 

869 

870 with warnings.catch_warnings(): 

871 warnings.filterwarnings("ignore", "'make_chunk_iter", DeprecationWarning) 

872 return cls(decoder) 

873 

874 

875def _url_decode_impl( 

876 pair_iter: t.Iterable[t.AnyStr], charset: str, include_empty: bool, errors: str 

877) -> t.Iterator[tuple[str, str]]: 

878 for pair in pair_iter: 

879 if not pair: 

880 continue 

881 s = _make_encode_wrapper(pair) 

882 equal = s("=") 

883 if equal in pair: 

884 key, value = pair.split(equal, 1) 

885 else: 

886 if not include_empty: 

887 continue 

888 key = pair 

889 value = s("") 

890 yield ( 

891 url_unquote_plus(key, charset, errors), 

892 url_unquote_plus(value, charset, errors), 

893 ) 

894 

895 

896def url_encode( 

897 obj: t.Mapping[str, str] | t.Iterable[tuple[str, str]], 

898 charset: str = "utf-8", 

899 sort: bool = False, 

900 key: t.Callable[[tuple[str, str]], t.Any] | None = None, 

901 separator: str = "&", 

902) -> str: 

903 """URL encode a dict/`MultiDict`. If a value is `None` it will not appear 

904 in the result string. Per default only values are encoded into the target 

905 charset strings. 

906 

907 :param obj: the object to encode into a query string. 

908 :param charset: the charset of the query string. 

909 :param sort: set to `True` if you want parameters to be sorted by `key`. 

910 :param separator: the separator to be used for the pairs. 

911 :param key: an optional function to be used for sorting. For more details 

912 check out the :func:`sorted` documentation. 

913 

914 .. deprecated:: 2.3 

915 Will be removed in Werkzeug 2.4. Use ``urllib.parse.urlencode`` instead. 

916 

917 .. versionchanged:: 2.1 

918 The ``encode_keys`` parameter was removed. 

919 

920 .. versionchanged:: 0.5 

921 Added the ``sort``, ``key``, and ``separator`` parameters. 

922 """ 

923 separator = _to_str(separator, "ascii") 

924 return separator.join(_url_encode_impl(obj, charset, sort, key)) 

925 

926 

927def url_encode_stream( 

928 obj: t.Mapping[str, str] | t.Iterable[tuple[str, str]], 

929 stream: t.IO[str] | None = None, 

930 charset: str = "utf-8", 

931 sort: bool = False, 

932 key: t.Callable[[tuple[str, str]], t.Any] | None = None, 

933 separator: str = "&", 

934) -> None: 

935 """Like :meth:`url_encode` but writes the results to a stream 

936 object. If the stream is `None` a generator over all encoded 

937 pairs is returned. 

938 

939 :param obj: the object to encode into a query string. 

940 :param stream: a stream to write the encoded object into or `None` if 

941 an iterator over the encoded pairs should be returned. In 

942 that case the separator argument is ignored. 

943 :param charset: the charset of the query string. 

944 :param sort: set to `True` if you want parameters to be sorted by `key`. 

945 :param separator: the separator to be used for the pairs. 

946 :param key: an optional function to be used for sorting. For more details 

947 check out the :func:`sorted` documentation. 

948 

949 .. deprecated:: 2.3 

950 Will be removed in Werkzeug 2.4. Use ``urllib.parse.urlencode`` instead. 

951 

952 .. versionchanged:: 2.1 

953 The ``encode_keys`` parameter was removed. 

954 

955 .. versionadded:: 0.8 

956 """ 

957 separator = _to_str(separator, "ascii") 

958 gen = _url_encode_impl(obj, charset, sort, key) 

959 if stream is None: 

960 return gen # type: ignore 

961 for idx, chunk in enumerate(gen): 

962 if idx: 

963 stream.write(separator) 

964 stream.write(chunk) 

965 return None 

966 

967 

968def url_join( 

969 base: str | tuple[str, str, str, str, str], 

970 url: str | tuple[str, str, str, str, str], 

971 allow_fragments: bool = True, 

972) -> str: 

973 """Join a base URL and a possibly relative URL to form an absolute 

974 interpretation of the latter. 

975 

976 :param base: the base URL for the join operation. 

977 :param url: the URL to join. 

978 :param allow_fragments: indicates whether fragments should be allowed. 

979 

980 .. deprecated:: 2.3 

981 Will be removed in Werkzeug 2.4. Use ``urllib.parse.urljoin`` instead. 

982 """ 

983 if isinstance(base, tuple): 

984 base = url_unparse(base) 

985 if isinstance(url, tuple): 

986 url = url_unparse(url) 

987 

988 _check_str_tuple((base, url)) 

989 s = _make_encode_wrapper(base) 

990 

991 if not base: 

992 return url 

993 if not url: 

994 return base 

995 

996 bscheme, bnetloc, bpath, bquery, _bfragment = url_parse( 

997 base, allow_fragments=allow_fragments 

998 ) 

999 scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments) 

1000 if scheme != bscheme: 

1001 return url 

1002 if netloc: 

1003 return url_unparse((scheme, netloc, path, query, fragment)) 

1004 netloc = bnetloc 

1005 

1006 if path[:1] == s("/"): 

1007 segments = path.split(s("/")) 

1008 elif not path: 

1009 segments = bpath.split(s("/")) 

1010 if not query: 

1011 query = bquery 

1012 else: 

1013 segments = bpath.split(s("/"))[:-1] + path.split(s("/")) 

1014 

1015 # If the rightmost part is "./" we want to keep the slash but 

1016 # remove the dot. 

1017 if segments[-1] == s("."): 

1018 segments[-1] = s("") 

1019 

1020 # Resolve ".." and "." 

1021 segments = [segment for segment in segments if segment != s(".")] 

1022 while True: 

1023 i = 1 

1024 n = len(segments) - 1 

1025 while i < n: 

1026 if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")): 

1027 del segments[i - 1 : i + 1] 

1028 break 

1029 i += 1 

1030 else: 

1031 break 

1032 

1033 # Remove trailing ".." if the URL is absolute 

1034 unwanted_marker = [s(""), s("..")] 

1035 while segments[:2] == unwanted_marker: 

1036 del segments[1] 

1037 

1038 path = s("/").join(segments) 

1039 return url_unparse((scheme, netloc, path, query, fragment)) 

1040 

1041 

1042def patch_module(): 

1043 from odoo.tools.json import scriptsafe 

1044 Request.json_module = Response.json_module = scriptsafe 

1045 

1046 FileStorage.save = lambda self, dst, buffer_size=(1 << 20): copyfileobj(self.stream, dst, buffer_size) 

1047 

1048 def _multidict_deepcopy(self, memo=None): 

1049 return orig_deepcopy(self) 

1050 

1051 orig_deepcopy = MultiDict.deepcopy 

1052 MultiDict.deepcopy = _multidict_deepcopy 

1053 

1054 if Rule_get_func_code: 1054 ↛ 1061line 1054 didn't jump to line 1061 because the condition on line 1054 was always true

1055 @staticmethod 

1056 def _get_func_code(code, name): 

1057 assert isinstance(code, CodeType) 

1058 return Rule_get_func_code(code, name) 

1059 Rule._get_func_code = _get_func_code 

1060 

1061 if hasattr(urls, 'url_join'): 1061 ↛ 1063line 1061 didn't jump to line 1063 because the condition on line 1061 was never true

1062 # URLs are already patched 

1063 return 

1064 # see https://github.com/pallets/werkzeug/compare/2.3.0..3.0.0 

1065 # see https://github.com/pallets/werkzeug/blob/2.3.0/src/werkzeug/urls.py for replacement 

1066 urls.url_decode = url_decode 

1067 urls.url_encode = url_encode 

1068 urls.url_join = url_join 

1069 urls.url_parse = url_parse 

1070 urls.url_quote = url_quote 

1071 urls.url_unquote = url_unquote 

1072 urls.url_quote_plus = url_quote_plus 

1073 urls.url_unquote_plus = url_unquote_plus 

1074 urls.url_unparse = url_unparse 

1075 urls.URL = URL