[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2024-04-13 00:00:18 +00:00
parent 72ad6dc953
commit f4cd1ba0d6
813 changed files with 66015 additions and 58839 deletions

View file

@ -1,9 +1,10 @@
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from __future__ import annotations
__all__ = ("loads", "load", "TOMLDecodeError")
__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
__all__ = ('loads', 'load', 'TOMLDecodeError')
__version__ = '2.0.1' # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
from ._parser import TOMLDecodeError, load, loads

View file

@ -1,52 +1,53 @@
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from __future__ import annotations
from collections.abc import Iterable
import string
from collections.abc import Iterable
from types import MappingProxyType
from typing import Any, BinaryIO, NamedTuple
from typing import Any
from typing import BinaryIO
from typing import NamedTuple
from ._re import (
RE_DATETIME,
RE_LOCALTIME,
RE_NUMBER,
match_to_datetime,
match_to_localtime,
match_to_number,
)
from ._types import Key, ParseFloat, Pos
from ._re import match_to_datetime
from ._re import match_to_localtime
from ._re import match_to_number
from ._re import RE_DATETIME
from ._re import RE_LOCALTIME
from ._re import RE_NUMBER
from ._types import Key
from ._types import ParseFloat
from ._types import Pos
ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
# Neither of these sets include quotation mark or backslash. They are
# currently handled as separate cases in the parser functions.
ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset('\t')
ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset('\t\n')
ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS
ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
TOML_WS = frozenset(" \t")
TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
TOML_WS = frozenset(' \t')
TOML_WS_AND_NEWLINE = TOML_WS | frozenset('\n')
BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + '-_')
KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
HEXDIGIT_CHARS = frozenset(string.hexdigits)
BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
{
"\\b": "\u0008", # backspace
"\\t": "\u0009", # tab
"\\n": "\u000A", # linefeed
"\\f": "\u000C", # form feed
"\\r": "\u000D", # carriage return
'\\"': "\u0022", # quote
"\\\\": "\u005C", # backslash
}
'\\b': '\u0008', # backspace
'\\t': '\u0009', # tab
'\\n': '\u000A', # linefeed
'\\f': '\u000C', # form feed
'\\r': '\u000D', # carriage return
'\\"': '\u0022', # quote
'\\\\': '\u005C', # backslash
},
)
@ -61,7 +62,7 @@ def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
s = b.decode()
except AttributeError:
raise TypeError(
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`",
) from None
return loads(s, parse_float=parse_float)
@ -71,7 +72,7 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no
# The spec allows converting "\r\n" to "\n", even in string
# literals. Let's do so to simplify parsing.
src = __s.replace("\r\n", "\n")
src = __s.replace('\r\n', '\n')
pos = 0
out = Output(NestedDict(), Flags())
header: Key = ()
@ -95,25 +96,25 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no
char = src[pos]
except IndexError:
break
if char == "\n":
if char == '\n':
pos += 1
continue
if char in KEY_INITIAL_CHARS:
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
elif char == '[':
try:
second_char: str | None = src[pos + 1]
except IndexError:
second_char = None
out.flags.finalize_pending()
if second_char == "[":
if second_char == '[':
pos, header = create_list_rule(src, pos, out)
else:
pos, header = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif char != "#":
raise suffixed_err(src, pos, "Invalid statement")
elif char != '#':
raise suffixed_err(src, pos, 'Invalid statement')
# 3. Skip comment
pos = skip_comment(src, pos)
@ -123,9 +124,9 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no
char = src[pos]
except IndexError:
break
if char != "\n":
if char != '\n':
raise suffixed_err(
src, pos, "Expected newline or end of document after a statement"
src, pos, 'Expected newline or end of document after a statement',
)
pos += 1
@ -158,7 +159,7 @@ class Flags:
for k in key[:-1]:
if k not in cont:
return
cont = cont[k]["nested"]
cont = cont[k]['nested']
cont.pop(key[-1], None)
def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
@ -166,11 +167,11 @@ class Flags:
key_parent, key_stem = key[:-1], key[-1]
for k in key_parent:
if k not in cont:
cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont = cont[k]["nested"]
cont[k] = {'flags': set(), 'recursive_flags': set(), 'nested': {}}
cont = cont[k]['nested']
if key_stem not in cont:
cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
cont[key_stem] = {'flags': set(), 'recursive_flags': set(), 'nested': {}}
cont[key_stem]['recursive_flags' if recursive else 'flags'].add(flag)
def is_(self, key: Key, flag: int) -> bool:
if not key:
@ -180,13 +181,13 @@ class Flags:
if k not in cont:
return False
inner_cont = cont[k]
if flag in inner_cont["recursive_flags"]:
if flag in inner_cont['recursive_flags']:
return True
cont = inner_cont["nested"]
cont = inner_cont['nested']
key_stem = key[-1]
if key_stem in cont:
cont = cont[key_stem]
return flag in cont["flags"] or flag in cont["recursive_flags"]
return flag in cont['flags'] or flag in cont['recursive_flags']
return False
@ -209,7 +210,7 @@ class NestedDict:
if access_lists and isinstance(cont, list):
cont = cont[-1]
if not isinstance(cont, dict):
raise KeyError("There is no nest behind this key")
raise KeyError('There is no nest behind this key')
return cont
def append_nest_to_list(self, key: Key) -> None:
@ -218,7 +219,7 @@ class NestedDict:
if last_key in cont:
list_ = cont[last_key]
if not isinstance(list_, list):
raise KeyError("An object other than list found behind this key")
raise KeyError('An object other than list found behind this key')
list_.append({})
else:
cont[last_key] = [{}]
@ -251,12 +252,12 @@ def skip_until(
except ValueError:
new_pos = len(src)
if error_on_eof:
raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None
raise suffixed_err(src, new_pos, f'Expected {expect!r}') from None
if not error_on.isdisjoint(src[pos:new_pos]):
while src[pos] not in error_on:
pos += 1
raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}")
raise suffixed_err(src, pos, f'Found invalid character {src[pos]!r}')
return new_pos
@ -265,9 +266,9 @@ def skip_comment(src: str, pos: Pos) -> Pos:
char: str | None = src[pos]
except IndexError:
char = None
if char == "#":
if char == '#':
return skip_until(
src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
src, pos + 1, '\n', error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False,
)
return pos
@ -287,14 +288,14 @@ def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos, key = parse_key(src, pos)
if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot declare {key} twice")
raise suffixed_err(src, pos, f'Cannot declare {key} twice')
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
try:
out.data.get_or_create_nest(key)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
raise suffixed_err(src, pos, 'Cannot overwrite a value') from None
if not src.startswith("]", pos):
if not src.startswith(']', pos):
raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration")
return pos + 1, key
@ -305,7 +306,7 @@ def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos, key = parse_key(src, pos)
if out.flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
raise suffixed_err(src, pos, f'Cannot mutate immutable namespace {key}')
# Free the namespace now that it points to another empty list item...
out.flags.unset_all(key)
# ...but this key precisely is still prohibited from table declaration
@ -313,15 +314,15 @@ def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
try:
out.data.append_nest_to_list(key)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
raise suffixed_err(src, pos, 'Cannot overwrite a value') from None
if not src.startswith("]]", pos):
if not src.startswith(']]', pos):
raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration")
return pos + 2, key
def key_value_rule(
src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat,
) -> Pos:
pos, key, value = parse_key_value_pair(src, pos, parse_float)
key_parent, key_stem = key[:-1], key[-1]
@ -331,22 +332,22 @@ def key_value_rule(
for cont_key in relative_path_cont_keys:
# Check that dotted key syntax does not redefine an existing table
if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}")
raise suffixed_err(src, pos, f'Cannot redefine namespace {cont_key}')
# Containers in the relative path can't be opened with the table syntax or
# dotted key/value syntax in following table sections.
out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)
if out.flags.is_(abs_key_parent, Flags.FROZEN):
raise suffixed_err(
src, pos, f"Cannot mutate immutable namespace {abs_key_parent}"
src, pos, f'Cannot mutate immutable namespace {abs_key_parent}',
)
try:
nest = out.data.get_or_create_nest(abs_key_parent)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
raise suffixed_err(src, pos, 'Cannot overwrite a value') from None
if key_stem in nest:
raise suffixed_err(src, pos, "Cannot overwrite a value")
raise suffixed_err(src, pos, 'Cannot overwrite a value')
# Mark inline table and array namespaces recursively immutable
if isinstance(value, (dict, list)):
out.flags.set(header + key, Flags.FROZEN, recursive=True)
@ -355,14 +356,14 @@ def key_value_rule(
def parse_key_value_pair(
src: str, pos: Pos, parse_float: ParseFloat
src: str, pos: Pos, parse_float: ParseFloat,
) -> tuple[Pos, Key, Any]:
pos, key = parse_key(src, pos)
try:
char: str | None = src[pos]
except IndexError:
char = None
if char != "=":
if char != '=':
raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair")
pos += 1
pos = skip_chars(src, pos, TOML_WS)
@ -379,7 +380,7 @@ def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
char: str | None = src[pos]
except IndexError:
char = None
if char != ".":
if char != '.':
return pos, key
pos += 1
pos = skip_chars(src, pos, TOML_WS)
@ -401,7 +402,7 @@ def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
return parse_literal_str(src, pos)
if char == '"':
return parse_one_line_basic_str(src, pos)
raise suffixed_err(src, pos, "Invalid initial character for a key part")
raise suffixed_err(src, pos, 'Invalid initial character for a key part')
def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
@ -414,22 +415,22 @@ def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]
array: list = []
pos = skip_comments_and_array_ws(src, pos)
if src.startswith("]", pos):
if src.startswith(']', pos):
return pos + 1, array
while True:
pos, val = parse_value(src, pos, parse_float)
array.append(val)
pos = skip_comments_and_array_ws(src, pos)
c = src[pos : pos + 1]
if c == "]":
c = src[pos: pos + 1]
if c == ']':
return pos + 1, array
if c != ",":
raise suffixed_err(src, pos, "Unclosed array")
if c != ',':
raise suffixed_err(src, pos, 'Unclosed array')
pos += 1
pos = skip_comments_and_array_ws(src, pos)
if src.startswith("]", pos):
if src.startswith(']', pos):
return pos + 1, array
@ -439,26 +440,26 @@ def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos
flags = Flags()
pos = skip_chars(src, pos, TOML_WS)
if src.startswith("}", pos):
if src.startswith('}', pos):
return pos + 1, nested_dict.dict
while True:
pos, key, value = parse_key_value_pair(src, pos, parse_float)
key_parent, key_stem = key[:-1], key[-1]
if flags.is_(key, Flags.FROZEN):
raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
raise suffixed_err(src, pos, f'Cannot mutate immutable namespace {key}')
try:
nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
except KeyError:
raise suffixed_err(src, pos, "Cannot overwrite a value") from None
raise suffixed_err(src, pos, 'Cannot overwrite a value') from None
if key_stem in nest:
raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}")
raise suffixed_err(src, pos, f'Duplicate inline table key {key_stem!r}')
nest[key_stem] = value
pos = skip_chars(src, pos, TOML_WS)
c = src[pos : pos + 1]
if c == "}":
c = src[pos: pos + 1]
if c == '}':
return pos + 1, nested_dict.dict
if c != ",":
raise suffixed_err(src, pos, "Unclosed inline table")
if c != ',':
raise suffixed_err(src, pos, 'Unclosed inline table')
if isinstance(value, (dict, list)):
flags.set(key, Flags.FROZEN, recursive=True)
pos += 1
@ -466,27 +467,27 @@ def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos
def parse_basic_str_escape(
src: str, pos: Pos, *, multiline: bool = False
src: str, pos: Pos, *, multiline: bool = False,
) -> tuple[Pos, str]:
escape_id = src[pos : pos + 2]
escape_id = src[pos: pos + 2]
pos += 2
if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
if multiline and escape_id in {'\\ ', '\\\t', '\\\n'}:
# Skip whitespace until next non-whitespace character or end of
# the doc. Error if non-whitespace is found before newline.
if escape_id != "\\\n":
if escape_id != '\\\n':
pos = skip_chars(src, pos, TOML_WS)
try:
char = src[pos]
except IndexError:
return pos, ""
if char != "\n":
return pos, ''
if char != '\n':
raise suffixed_err(src, pos, "Unescaped '\\' in a string")
pos += 1
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
return pos, ""
if escape_id == "\\u":
return pos, ''
if escape_id == '\\u':
return parse_hex_char(src, pos, 4)
if escape_id == "\\U":
if escape_id == '\\U':
return parse_hex_char(src, pos, 8)
try:
return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
@ -499,13 +500,13 @@ def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
hex_str = src[pos : pos + hex_len]
hex_str = src[pos: pos + hex_len]
if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
raise suffixed_err(src, pos, "Invalid hex value")
raise suffixed_err(src, pos, 'Invalid hex value')
pos += hex_len
hex_int = int(hex_str, 16)
if not is_unicode_scalar_value(hex_int):
raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
raise suffixed_err(src, pos, 'Escaped character is not a Unicode scalar value')
return pos, chr(hex_int)
@ -513,14 +514,14 @@ def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1 # Skip starting apostrophe
start_pos = pos
pos = skip_until(
src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True,
)
return pos + 1, src[start_pos:pos] # Skip ending apostrophe
def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
pos += 3
if src.startswith("\n", pos):
if src.startswith('\n', pos):
pos += 1
if literal:
@ -556,13 +557,13 @@ def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
else:
error_on = ILLEGAL_BASIC_STR_CHARS
parse_escapes = parse_basic_str_escape
result = ""
result = ''
start_pos = pos
while True:
try:
char = src[pos]
except IndexError:
raise suffixed_err(src, pos, "Unterminated string") from None
raise suffixed_err(src, pos, 'Unterminated string') from None
if char == '"':
if not multiline:
return pos + 1, result + src[start_pos:pos]
@ -570,19 +571,19 @@ def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
return pos + 3, result + src[start_pos:pos]
pos += 1
continue
if char == "\\":
if char == '\\':
result += src[start_pos:pos]
pos, parsed_escape = parse_escapes(src, pos)
result += parsed_escape
start_pos = pos
continue
if char in error_on:
raise suffixed_err(src, pos, f"Illegal character {char!r}")
raise suffixed_err(src, pos, f'Illegal character {char!r}')
pos += 1
def parse_value( # noqa: C901
src: str, pos: Pos, parse_float: ParseFloat
src: str, pos: Pos, parse_float: ParseFloat,
) -> tuple[Pos, Any]:
try:
char: str | None = src[pos]
@ -604,19 +605,19 @@ def parse_value( # noqa: C901
return parse_literal_str(src, pos)
# Booleans
if char == "t":
if src.startswith("true", pos):
if char == 't':
if src.startswith('true', pos):
return pos + 4, True
if char == "f":
if src.startswith("false", pos):
if char == 'f':
if src.startswith('false', pos):
return pos + 5, False
# Arrays
if char == "[":
if char == '[':
return parse_array(src, pos, parse_float)
# Inline tables
if char == "{":
if char == '{':
return parse_inline_table(src, pos, parse_float)
# Dates and times
@ -625,7 +626,7 @@ def parse_value( # noqa: C901
try:
datetime_obj = match_to_datetime(datetime_match)
except ValueError as e:
raise suffixed_err(src, pos, "Invalid date or datetime") from e
raise suffixed_err(src, pos, 'Invalid date or datetime') from e
return datetime_match.end(), datetime_obj
localtime_match = RE_LOCALTIME.match(src, pos)
if localtime_match:
@ -639,14 +640,14 @@ def parse_value( # noqa: C901
return number_match.end(), match_to_number(number_match, parse_float)
# Special floats
first_three = src[pos : pos + 3]
if first_three in {"inf", "nan"}:
first_three = src[pos: pos + 3]
if first_three in {'inf', 'nan'}:
return pos + 3, parse_float(first_three)
first_four = src[pos : pos + 4]
if first_four in {"-inf", "+inf", "-nan", "+nan"}:
first_four = src[pos: pos + 4]
if first_four in {'-inf', '+inf', '-nan', '+nan'}:
return pos + 4, parse_float(first_four)
raise suffixed_err(src, pos, "Invalid value")
raise suffixed_err(src, pos, 'Invalid value')
def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
@ -655,15 +656,15 @@ def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
def coord_repr(src: str, pos: Pos) -> str:
if pos >= len(src):
return "end of document"
line = src.count("\n", 0, pos) + 1
return 'end of document'
line = src.count('\n', 0, pos) + 1
if line == 1:
column = pos + 1
else:
column = pos - src.rindex("\n", 0, pos)
return f"line {line}, column {column}"
column = pos - src.rindex('\n', 0, pos)
return f'line {line}, column {column}'
return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
return TOMLDecodeError(f'{msg} (at {coord_repr(src, pos)})')
def is_unicode_scalar_value(codepoint: int) -> bool:
@ -685,7 +686,7 @@ def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
def safe_parse_float(float_str: str) -> Any:
float_value = parse_float(float_str)
if isinstance(float_value, (dict, list)):
raise ValueError("parse_float must not return dicts or lists")
raise ValueError('parse_float must not return dicts or lists')
return float_value
return safe_parse_float

View file

@ -1,12 +1,16 @@
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from __future__ import annotations
from datetime import date, datetime, time, timedelta, timezone, tzinfo
from functools import lru_cache
import re
from datetime import date
from datetime import datetime
from datetime import time
from datetime import timedelta
from datetime import timezone
from datetime import tzinfo
from functools import lru_cache
from typing import Any
from ._types import ParseFloat
@ -14,7 +18,7 @@ from ._types import ParseFloat
# E.g.
# - 00:32:00.999999
# - 00:32:00
_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
_TIME_RE_STR = r'([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?'
RE_NUMBER = re.compile(
r"""
@ -72,10 +76,10 @@ def match_to_datetime(match: re.Match) -> datetime | date:
if hour_str is None:
return date(year, month, day)
hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
micros = int(micros_str.ljust(6, '0')) if micros_str else 0
if offset_sign_str:
tz: tzinfo | None = cached_tz(
offset_hour_str, offset_minute_str, offset_sign_str
offset_hour_str, offset_minute_str, offset_sign_str,
)
elif zulu_time:
tz = timezone.utc
@ -86,22 +90,22 @@ def match_to_datetime(match: re.Match) -> datetime | date:
@lru_cache(maxsize=None)
def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
sign = 1 if sign_str == "+" else -1
sign = 1 if sign_str == '+' else -1
return timezone(
timedelta(
hours=sign * int(hour_str),
minutes=sign * int(minute_str),
)
),
)
def match_to_localtime(match: re.Match) -> time:
hour_str, minute_str, sec_str, micros_str = match.groups()
micros = int(micros_str.ljust(6, "0")) if micros_str else 0
micros = int(micros_str.ljust(6, '0')) if micros_str else 0
return time(int(hour_str), int(minute_str), int(sec_str), micros)
def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
if match.group("floatpart"):
if match.group('floatpart'):
return parse_float(match.group())
return int(match.group(), 0)

View file

@ -1,8 +1,11 @@
# SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
# Licensed to PSF under a Contributor Agreement.
from __future__ import annotations
from typing import Any, Callable, Tuple
from typing import Any
from typing import Callable
from typing import Tuple
# Type annotations
ParseFloat = Callable[[str], Any]