aboutsummaryrefslogtreecommitdiff
path: root/venv/lib/python3.8/site-packages/werkzeug
diff options
context:
space:
mode:
authorsotech117 <michael_foiani@brown.edu>2025-07-31 17:27:24 -0400
committersotech117 <michael_foiani@brown.edu>2025-07-31 17:27:24 -0400
commit5bf22fc7e3c392c8bd44315ca2d06d7dca7d084e (patch)
tree8dacb0f195df1c0788d36dd0064f6bbaa3143ede /venv/lib/python3.8/site-packages/werkzeug
parentb832d364da8c2efe09e3f75828caf73c50d01ce3 (diff)
add code for analysis of data
Diffstat (limited to 'venv/lib/python3.8/site-packages/werkzeug')
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/__init__.py25
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/_internal.py211
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/_reloader.py471
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/__init__.py34
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/accept.py326
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/accept.pyi54
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/auth.py316
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/cache_control.py175
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/cache_control.pyi115
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/csp.py94
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/csp.pyi169
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/etag.py95
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/etag.pyi30
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/file_storage.py196
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/file_storage.pyi49
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/headers.py515
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/headers.pyi109
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/mixins.py242
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/mixins.pyi97
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/range.py180
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/range.pyi57
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/structures.py1010
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/datastructures/structures.pyi206
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/__init__.py565
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/console.py219
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/repr.py282
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/shared/ICON_LICENSE.md6
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/shared/console.pngbin0 -> 507 bytes
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/shared/debugger.js344
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/shared/less.pngbin0 -> 191 bytes
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/shared/more.pngbin0 -> 200 bytes
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/shared/style.css150
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/debug/tbtools.py450
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/exceptions.py881
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/formparser.py430
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/http.py1391
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/local.py653
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/middleware/__init__.py0
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/middleware/dispatcher.py81
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/middleware/http_proxy.py236
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/middleware/lint.py439
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/middleware/profiler.py155
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/middleware/proxy_fix.py183
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/middleware/shared_data.py283
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/py.typed0
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/routing/__init__.py134
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/routing/converters.py261
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/routing/exceptions.py152
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/routing/map.py951
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/routing/matcher.py202
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/routing/rules.py928
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/sansio/__init__.py0
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/sansio/http.py171
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/sansio/multipart.py323
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/sansio/request.py536
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/sansio/response.py754
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/sansio/utils.py159
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/security.py163
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/serving.py1125
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/test.py1464
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/testapp.py194
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/urls.py203
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/user_agent.py47
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/utils.py691
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/wrappers/__init__.py3
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/wrappers/request.py647
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/wrappers/response.py831
-rw-r--r--venv/lib/python3.8/site-packages/werkzeug/wsgi.py595
68 files changed, 22058 insertions, 0 deletions
diff --git a/venv/lib/python3.8/site-packages/werkzeug/__init__.py b/venv/lib/python3.8/site-packages/werkzeug/__init__.py
new file mode 100644
index 0000000..57cb753
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/__init__.py
@@ -0,0 +1,25 @@
+from __future__ import annotations
+
+import typing as t
+
+from .serving import run_simple as run_simple
+from .test import Client as Client
+from .wrappers import Request as Request
+from .wrappers import Response as Response
+
+
+def __getattr__(name: str) -> t.Any:
+ if name == "__version__":
+ import importlib.metadata
+ import warnings
+
+ warnings.warn(
+ "The '__version__' attribute is deprecated and will be removed in"
+ " Werkzeug 3.1. Use feature detection or"
+ " 'importlib.metadata.version(\"werkzeug\")' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return importlib.metadata.version("werkzeug")
+
+ raise AttributeError(name)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/_internal.py b/venv/lib/python3.8/site-packages/werkzeug/_internal.py
new file mode 100644
index 0000000..7dd2fbc
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/_internal.py
@@ -0,0 +1,211 @@
+from __future__ import annotations
+
+import logging
+import re
+import sys
+import typing as t
+from datetime import datetime
+from datetime import timezone
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from .wrappers.request import Request
+
+_logger: logging.Logger | None = None
+
+
+class _Missing:
+ def __repr__(self) -> str:
+ return "no value"
+
+ def __reduce__(self) -> str:
+ return "_missing"
+
+
+_missing = _Missing()
+
+
+def _wsgi_decoding_dance(s: str) -> str:
+ return s.encode("latin1").decode(errors="replace")
+
+
+def _wsgi_encoding_dance(s: str) -> str:
+ return s.encode().decode("latin1")
+
+
+def _get_environ(obj: WSGIEnvironment | Request) -> WSGIEnvironment:
+ env = getattr(obj, "environ", obj)
+ assert isinstance(
+ env, dict
+ ), f"{type(obj).__name__!r} is not a WSGI environment (has to be a dict)"
+ return env
+
+
+def _has_level_handler(logger: logging.Logger) -> bool:
+ """Check if there is a handler in the logging chain that will handle
+ the given logger's effective level.
+ """
+ level = logger.getEffectiveLevel()
+ current = logger
+
+ while current:
+ if any(handler.level <= level for handler in current.handlers):
+ return True
+
+ if not current.propagate:
+ break
+
+ current = current.parent # type: ignore
+
+ return False
+
+
+class _ColorStreamHandler(logging.StreamHandler): # type: ignore[type-arg]
+ """On Windows, wrap stream with Colorama for ANSI style support."""
+
+ def __init__(self) -> None:
+ try:
+ import colorama
+ except ImportError:
+ stream = None
+ else:
+ stream = colorama.AnsiToWin32(sys.stderr)
+
+ super().__init__(stream)
+
+
+def _log(type: str, message: str, *args: t.Any, **kwargs: t.Any) -> None:
+ """Log a message to the 'werkzeug' logger.
+
+ The logger is created the first time it is needed. If there is no
+ level set, it is set to :data:`logging.INFO`. If there is no handler
+ for the logger's effective level, a :class:`logging.StreamHandler`
+ is added.
+ """
+ global _logger
+
+ if _logger is None:
+ _logger = logging.getLogger("werkzeug")
+
+ if _logger.level == logging.NOTSET:
+ _logger.setLevel(logging.INFO)
+
+ if not _has_level_handler(_logger):
+ _logger.addHandler(_ColorStreamHandler())
+
+ getattr(_logger, type)(message.rstrip(), *args, **kwargs)
+
+
+@t.overload
+def _dt_as_utc(dt: None) -> None: ...
+
+
+@t.overload
+def _dt_as_utc(dt: datetime) -> datetime: ...
+
+
+def _dt_as_utc(dt: datetime | None) -> datetime | None:
+ if dt is None:
+ return dt
+
+ if dt.tzinfo is None:
+ return dt.replace(tzinfo=timezone.utc)
+ elif dt.tzinfo != timezone.utc:
+ return dt.astimezone(timezone.utc)
+
+ return dt
+
+
+_TAccessorValue = t.TypeVar("_TAccessorValue")
+
+
+class _DictAccessorProperty(t.Generic[_TAccessorValue]):
+ """Baseclass for `environ_property` and `header_property`."""
+
+ read_only = False
+
+ def __init__(
+ self,
+ name: str,
+ default: _TAccessorValue | None = None,
+ load_func: t.Callable[[str], _TAccessorValue] | None = None,
+ dump_func: t.Callable[[_TAccessorValue], str] | None = None,
+ read_only: bool | None = None,
+ doc: str | None = None,
+ ) -> None:
+ self.name = name
+ self.default = default
+ self.load_func = load_func
+ self.dump_func = dump_func
+ if read_only is not None:
+ self.read_only = read_only
+ self.__doc__ = doc
+
+ def lookup(self, instance: t.Any) -> t.MutableMapping[str, t.Any]:
+ raise NotImplementedError
+
+ @t.overload
+ def __get__(
+ self, instance: None, owner: type
+ ) -> _DictAccessorProperty[_TAccessorValue]: ...
+
+ @t.overload
+ def __get__(self, instance: t.Any, owner: type) -> _TAccessorValue: ...
+
+ def __get__(
+ self, instance: t.Any | None, owner: type
+ ) -> _TAccessorValue | _DictAccessorProperty[_TAccessorValue]:
+ if instance is None:
+ return self
+
+ storage = self.lookup(instance)
+
+ if self.name not in storage:
+ return self.default # type: ignore
+
+ value = storage[self.name]
+
+ if self.load_func is not None:
+ try:
+ return self.load_func(value)
+ except (ValueError, TypeError):
+ return self.default # type: ignore
+
+ return value # type: ignore
+
+ def __set__(self, instance: t.Any, value: _TAccessorValue) -> None:
+ if self.read_only:
+ raise AttributeError("read only property")
+
+ if self.dump_func is not None:
+ self.lookup(instance)[self.name] = self.dump_func(value)
+ else:
+ self.lookup(instance)[self.name] = value
+
+ def __delete__(self, instance: t.Any) -> None:
+ if self.read_only:
+ raise AttributeError("read only property")
+
+ self.lookup(instance).pop(self.name, None)
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.name}>"
+
+
+_plain_int_re = re.compile(r"-?\d+", re.ASCII)
+
+
+def _plain_int(value: str) -> int:
+ """Parse an int only if it is only ASCII digits and ``-``.
+
+ This disallows ``+``, ``_``, and non-ASCII digits, which are accepted by ``int`` but
+ are not allowed in HTTP header values.
+
+ Any leading or trailing whitespace is stripped
+ """
+ value = value.strip()
+ if _plain_int_re.fullmatch(value) is None:
+ raise ValueError
+
+ return int(value)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/_reloader.py b/venv/lib/python3.8/site-packages/werkzeug/_reloader.py
new file mode 100644
index 0000000..8fd50b9
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/_reloader.py
@@ -0,0 +1,471 @@
+from __future__ import annotations
+
+import fnmatch
+import os
+import subprocess
+import sys
+import threading
+import time
+import typing as t
+from itertools import chain
+from pathlib import PurePath
+
+from ._internal import _log
+
+# The various system prefixes where imports are found. Base values are
+# different when running in a virtualenv. All reloaders will ignore the
+# base paths (usually the system installation). The stat reloader won't
+# scan the virtualenv paths, it will only include modules that are
+# already imported.
+_ignore_always = tuple({sys.base_prefix, sys.base_exec_prefix})
+prefix = {*_ignore_always, sys.prefix, sys.exec_prefix}
+
+if hasattr(sys, "real_prefix"):
+ # virtualenv < 20
+ prefix.add(sys.real_prefix)
+
+_stat_ignore_scan = tuple(prefix)
+del prefix
+_ignore_common_dirs = {
+ "__pycache__",
+ ".git",
+ ".hg",
+ ".tox",
+ ".nox",
+ ".pytest_cache",
+ ".mypy_cache",
+}
+
+
+def _iter_module_paths() -> t.Iterator[str]:
+ """Find the filesystem paths associated with imported modules."""
+ # List is in case the value is modified by the app while updating.
+ for module in list(sys.modules.values()):
+ name = getattr(module, "__file__", None)
+
+ if name is None or name.startswith(_ignore_always):
+ continue
+
+ while not os.path.isfile(name):
+ # Zip file, find the base file without the module path.
+ old = name
+ name = os.path.dirname(name)
+
+ if name == old: # skip if it was all directories somehow
+ break
+ else:
+ yield name
+
+
+def _remove_by_pattern(paths: set[str], exclude_patterns: set[str]) -> None:
+ for pattern in exclude_patterns:
+ paths.difference_update(fnmatch.filter(paths, pattern))
+
+
+def _find_stat_paths(
+ extra_files: set[str], exclude_patterns: set[str]
+) -> t.Iterable[str]:
+ """Find paths for the stat reloader to watch. Returns imported
+ module files, Python files under non-system paths. Extra files and
+ Python files under extra directories can also be scanned.
+
+ System paths have to be excluded for efficiency. Non-system paths,
+ such as a project root or ``sys.path.insert``, should be the paths
+ of interest to the user anyway.
+ """
+ paths = set()
+
+ for path in chain(list(sys.path), extra_files):
+ path = os.path.abspath(path)
+
+ if os.path.isfile(path):
+ # zip file on sys.path, or extra file
+ paths.add(path)
+ continue
+
+ parent_has_py = {os.path.dirname(path): True}
+
+ for root, dirs, files in os.walk(path):
+ # Optimizations: ignore system prefixes, __pycache__ will
+ # have a py or pyc module at the import path, ignore some
+ # common known dirs such as version control and tool caches.
+ if (
+ root.startswith(_stat_ignore_scan)
+ or os.path.basename(root) in _ignore_common_dirs
+ ):
+ dirs.clear()
+ continue
+
+ has_py = False
+
+ for name in files:
+ if name.endswith((".py", ".pyc")):
+ has_py = True
+ paths.add(os.path.join(root, name))
+
+ # Optimization: stop scanning a directory if neither it nor
+ # its parent contained Python files.
+ if not (has_py or parent_has_py[os.path.dirname(root)]):
+ dirs.clear()
+ continue
+
+ parent_has_py[root] = has_py
+
+ paths.update(_iter_module_paths())
+ _remove_by_pattern(paths, exclude_patterns)
+ return paths
+
+
+def _find_watchdog_paths(
+ extra_files: set[str], exclude_patterns: set[str]
+) -> t.Iterable[str]:
+ """Find paths for the stat reloader to watch. Looks at the same
+ sources as the stat reloader, but watches everything under
+ directories instead of individual files.
+ """
+ dirs = set()
+
+ for name in chain(list(sys.path), extra_files):
+ name = os.path.abspath(name)
+
+ if os.path.isfile(name):
+ name = os.path.dirname(name)
+
+ dirs.add(name)
+
+ for name in _iter_module_paths():
+ dirs.add(os.path.dirname(name))
+
+ _remove_by_pattern(dirs, exclude_patterns)
+ return _find_common_roots(dirs)
+
+
+def _find_common_roots(paths: t.Iterable[str]) -> t.Iterable[str]:
+ root: dict[str, dict[str, t.Any]] = {}
+
+ for chunks in sorted((PurePath(x).parts for x in paths), key=len, reverse=True):
+ node = root
+
+ for chunk in chunks:
+ node = node.setdefault(chunk, {})
+
+ node.clear()
+
+ rv = set()
+
+ def _walk(node: t.Mapping[str, dict[str, t.Any]], path: tuple[str, ...]) -> None:
+ for prefix, child in node.items():
+ _walk(child, path + (prefix,))
+
+ # If there are no more nodes, and a path has been accumulated, add it.
+ # Path may be empty if the "" entry is in sys.path.
+ if not node and path:
+ rv.add(os.path.join(*path))
+
+ _walk(root, ())
+ return rv
+
+
+def _get_args_for_reloading() -> list[str]:
+ """Determine how the script was executed, and return the args needed
+ to execute it again in a new process.
+ """
+ if sys.version_info >= (3, 10):
+ # sys.orig_argv, added in Python 3.10, contains the exact args used to invoke
+ # Python. Still replace argv[0] with sys.executable for accuracy.
+ return [sys.executable, *sys.orig_argv[1:]]
+
+ rv = [sys.executable]
+ py_script = sys.argv[0]
+ args = sys.argv[1:]
+ # Need to look at main module to determine how it was executed.
+ __main__ = sys.modules["__main__"]
+
+ # The value of __package__ indicates how Python was called. It may
+ # not exist if a setuptools script is installed as an egg. It may be
+ # set incorrectly for entry points created with pip on Windows.
+ if getattr(__main__, "__package__", None) is None or (
+ os.name == "nt"
+ and __main__.__package__ == ""
+ and not os.path.exists(py_script)
+ and os.path.exists(f"{py_script}.exe")
+ ):
+ # Executed a file, like "python app.py".
+ py_script = os.path.abspath(py_script)
+
+ if os.name == "nt":
+ # Windows entry points have ".exe" extension and should be
+ # called directly.
+ if not os.path.exists(py_script) and os.path.exists(f"{py_script}.exe"):
+ py_script += ".exe"
+
+ if (
+ os.path.splitext(sys.executable)[1] == ".exe"
+ and os.path.splitext(py_script)[1] == ".exe"
+ ):
+ rv.pop(0)
+
+ rv.append(py_script)
+ else:
+ # Executed a module, like "python -m werkzeug.serving".
+ if os.path.isfile(py_script):
+ # Rewritten by Python from "-m script" to "/path/to/script.py".
+ py_module = t.cast(str, __main__.__package__)
+ name = os.path.splitext(os.path.basename(py_script))[0]
+
+ if name != "__main__":
+ py_module += f".{name}"
+ else:
+ # Incorrectly rewritten by pydevd debugger from "-m script" to "script".
+ py_module = py_script
+
+ rv.extend(("-m", py_module.lstrip(".")))
+
+ rv.extend(args)
+ return rv
+
+
+class ReloaderLoop:
+ name = ""
+
+ def __init__(
+ self,
+ extra_files: t.Iterable[str] | None = None,
+ exclude_patterns: t.Iterable[str] | None = None,
+ interval: int | float = 1,
+ ) -> None:
+ self.extra_files: set[str] = {os.path.abspath(x) for x in extra_files or ()}
+ self.exclude_patterns: set[str] = set(exclude_patterns or ())
+ self.interval = interval
+
+ def __enter__(self) -> ReloaderLoop:
+ """Do any setup, then run one step of the watch to populate the
+ initial filesystem state.
+ """
+ self.run_step()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
+ """Clean up any resources associated with the reloader."""
+ pass
+
+ def run(self) -> None:
+ """Continually run the watch step, sleeping for the configured
+ interval after each step.
+ """
+ while True:
+ self.run_step()
+ time.sleep(self.interval)
+
+ def run_step(self) -> None:
+ """Run one step for watching the filesystem. Called once to set
+ up initial state, then repeatedly to update it.
+ """
+ pass
+
+ def restart_with_reloader(self) -> int:
+ """Spawn a new Python interpreter with the same arguments as the
+ current one, but running the reloader thread.
+ """
+ while True:
+ _log("info", f" * Restarting with {self.name}")
+ args = _get_args_for_reloading()
+ new_environ = os.environ.copy()
+ new_environ["WERKZEUG_RUN_MAIN"] = "true"
+ exit_code = subprocess.call(args, env=new_environ, close_fds=False)
+
+ if exit_code != 3:
+ return exit_code
+
+ def trigger_reload(self, filename: str) -> None:
+ self.log_reload(filename)
+ sys.exit(3)
+
+ def log_reload(self, filename: str | bytes) -> None:
+ filename = os.path.abspath(filename)
+ _log("info", f" * Detected change in {filename!r}, reloading")
+
+
+class StatReloaderLoop(ReloaderLoop):
+ name = "stat"
+
+ def __enter__(self) -> ReloaderLoop:
+ self.mtimes: dict[str, float] = {}
+ return super().__enter__()
+
+ def run_step(self) -> None:
+ for name in _find_stat_paths(self.extra_files, self.exclude_patterns):
+ try:
+ mtime = os.stat(name).st_mtime
+ except OSError:
+ continue
+
+ old_time = self.mtimes.get(name)
+
+ if old_time is None:
+ self.mtimes[name] = mtime
+ continue
+
+ if mtime > old_time:
+ self.trigger_reload(name)
+
+
+class WatchdogReloaderLoop(ReloaderLoop):
+ def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
+ from watchdog.events import EVENT_TYPE_CLOSED
+ from watchdog.events import EVENT_TYPE_CREATED
+ from watchdog.events import EVENT_TYPE_DELETED
+ from watchdog.events import EVENT_TYPE_MODIFIED
+ from watchdog.events import EVENT_TYPE_MOVED
+ from watchdog.events import FileModifiedEvent
+ from watchdog.events import PatternMatchingEventHandler
+ from watchdog.observers import Observer
+
+ super().__init__(*args, **kwargs)
+ trigger_reload = self.trigger_reload
+
+ class EventHandler(PatternMatchingEventHandler):
+ def on_any_event(self, event: FileModifiedEvent): # type: ignore
+ if event.event_type not in {
+ EVENT_TYPE_CLOSED,
+ EVENT_TYPE_CREATED,
+ EVENT_TYPE_DELETED,
+ EVENT_TYPE_MODIFIED,
+ EVENT_TYPE_MOVED,
+ }:
+ # skip events that don't involve changes to the file
+ return
+
+ trigger_reload(event.src_path)
+
+ reloader_name = Observer.__name__.lower() # type: ignore[attr-defined]
+
+ if reloader_name.endswith("observer"):
+ reloader_name = reloader_name[:-8]
+
+ self.name = f"watchdog ({reloader_name})"
+ self.observer = Observer()
+ # Extra patterns can be non-Python files, match them in addition
+ # to all Python files in default and extra directories. Ignore
+ # __pycache__ since a change there will always have a change to
+ # the source file (or initial pyc file) as well. Ignore Git and
+ # Mercurial internal changes.
+ extra_patterns = [p for p in self.extra_files if not os.path.isdir(p)]
+ self.event_handler = EventHandler(
+ patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
+ ignore_patterns=[
+ *[f"*/{d}/*" for d in _ignore_common_dirs],
+ *self.exclude_patterns,
+ ],
+ )
+ self.should_reload = False
+
+ def trigger_reload(self, filename: str | bytes) -> None:
+ # This is called inside an event handler, which means throwing
+ # SystemExit has no effect.
+ # https://github.com/gorakhargosh/watchdog/issues/294
+ self.should_reload = True
+ self.log_reload(filename)
+
+ def __enter__(self) -> ReloaderLoop:
+ self.watches: dict[str, t.Any] = {}
+ self.observer.start()
+ return super().__enter__()
+
+ def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
+ self.observer.stop()
+ self.observer.join()
+
+ def run(self) -> None:
+ while not self.should_reload:
+ self.run_step()
+ time.sleep(self.interval)
+
+ sys.exit(3)
+
+ def run_step(self) -> None:
+ to_delete = set(self.watches)
+
+ for path in _find_watchdog_paths(self.extra_files, self.exclude_patterns):
+ if path not in self.watches:
+ try:
+ self.watches[path] = self.observer.schedule(
+ self.event_handler, path, recursive=True
+ )
+ except OSError:
+ # Clear this path from list of watches We don't want
+ # the same error message showing again in the next
+ # iteration.
+ self.watches[path] = None
+
+ to_delete.discard(path)
+
+ for path in to_delete:
+ watch = self.watches.pop(path, None)
+
+ if watch is not None:
+ self.observer.unschedule(watch)
+
+
+reloader_loops: dict[str, type[ReloaderLoop]] = {
+ "stat": StatReloaderLoop,
+ "watchdog": WatchdogReloaderLoop,
+}
+
+try:
+ __import__("watchdog.observers")
+except ImportError:
+ reloader_loops["auto"] = reloader_loops["stat"]
+else:
+ reloader_loops["auto"] = reloader_loops["watchdog"]
+
+
+def ensure_echo_on() -> None:
+ """Ensure that echo mode is enabled. Some tools such as PDB disable
+ it which causes usability issues after a reload."""
+ # tcgetattr will fail if stdin isn't a tty
+ if sys.stdin is None or not sys.stdin.isatty():
+ return
+
+ try:
+ import termios
+ except ImportError:
+ return
+
+ attributes = termios.tcgetattr(sys.stdin)
+
+ if not attributes[3] & termios.ECHO:
+ attributes[3] |= termios.ECHO
+ termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
+
+
+def run_with_reloader(
+ main_func: t.Callable[[], None],
+ extra_files: t.Iterable[str] | None = None,
+ exclude_patterns: t.Iterable[str] | None = None,
+ interval: int | float = 1,
+ reloader_type: str = "auto",
+) -> None:
+ """Run the given function in an independent Python interpreter."""
+ import signal
+
+ signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
+ reloader = reloader_loops[reloader_type](
+ extra_files=extra_files, exclude_patterns=exclude_patterns, interval=interval
+ )
+
+ try:
+ if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
+ ensure_echo_on()
+ t = threading.Thread(target=main_func, args=())
+ t.daemon = True
+
+ # Enter the reloader to set up initial state, then start
+ # the app thread and reloader update loop.
+ with reloader:
+ t.start()
+ reloader.run()
+ else:
+ sys.exit(reloader.restart_with_reloader())
+ except KeyboardInterrupt:
+ pass
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/__init__.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/__init__.py
new file mode 100644
index 0000000..846ffce
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/__init__.py
@@ -0,0 +1,34 @@
+from .accept import Accept as Accept
+from .accept import CharsetAccept as CharsetAccept
+from .accept import LanguageAccept as LanguageAccept
+from .accept import MIMEAccept as MIMEAccept
+from .auth import Authorization as Authorization
+from .auth import WWWAuthenticate as WWWAuthenticate
+from .cache_control import RequestCacheControl as RequestCacheControl
+from .cache_control import ResponseCacheControl as ResponseCacheControl
+from .csp import ContentSecurityPolicy as ContentSecurityPolicy
+from .etag import ETags as ETags
+from .file_storage import FileMultiDict as FileMultiDict
+from .file_storage import FileStorage as FileStorage
+from .headers import EnvironHeaders as EnvironHeaders
+from .headers import Headers as Headers
+from .mixins import ImmutableDictMixin as ImmutableDictMixin
+from .mixins import ImmutableHeadersMixin as ImmutableHeadersMixin
+from .mixins import ImmutableListMixin as ImmutableListMixin
+from .mixins import ImmutableMultiDictMixin as ImmutableMultiDictMixin
+from .mixins import UpdateDictMixin as UpdateDictMixin
+from .range import ContentRange as ContentRange
+from .range import IfRange as IfRange
+from .range import Range as Range
+from .structures import CallbackDict as CallbackDict
+from .structures import CombinedMultiDict as CombinedMultiDict
+from .structures import HeaderSet as HeaderSet
+from .structures import ImmutableDict as ImmutableDict
+from .structures import ImmutableList as ImmutableList
+from .structures import ImmutableMultiDict as ImmutableMultiDict
+from .structures import ImmutableOrderedMultiDict as ImmutableOrderedMultiDict
+from .structures import ImmutableTypeConversionDict as ImmutableTypeConversionDict
+from .structures import iter_multi_items as iter_multi_items
+from .structures import MultiDict as MultiDict
+from .structures import OrderedMultiDict as OrderedMultiDict
+from .structures import TypeConversionDict as TypeConversionDict
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/accept.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/accept.py
new file mode 100644
index 0000000..d80f0bb
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/accept.py
@@ -0,0 +1,326 @@
+from __future__ import annotations
+
+import codecs
+import re
+
+from .structures import ImmutableList
+
+
+class Accept(ImmutableList):
+ """An :class:`Accept` object is just a list subclass for lists of
+ ``(value, quality)`` tuples. It is automatically sorted by specificity
+ and quality.
+
+ All :class:`Accept` objects work similar to a list but provide extra
+ functionality for working with the data. Containment checks are
+ normalized to the rules of that header:
+
+ >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
+ >>> a.best
+ 'ISO-8859-1'
+ >>> 'iso-8859-1' in a
+ True
+ >>> 'UTF8' in a
+ True
+ >>> 'utf7' in a
+ False
+
+ To get the quality for an item you can use normal item lookup:
+
+ >>> print a['utf-8']
+ 0.7
+ >>> a['utf7']
+ 0
+
+ .. versionchanged:: 0.5
+ :class:`Accept` objects are forced immutable now.
+
+ .. versionchanged:: 1.0.0
+ :class:`Accept` internal values are no longer ordered
+ alphabetically for equal quality tags. Instead the initial
+ order is preserved.
+
+ """
+
+ def __init__(self, values=()):
+ if values is None:
+ list.__init__(self)
+ self.provided = False
+ elif isinstance(values, Accept):
+ self.provided = values.provided
+ list.__init__(self, values)
+ else:
+ self.provided = True
+ values = sorted(
+ values, key=lambda x: (self._specificity(x[0]), x[1]), reverse=True
+ )
+ list.__init__(self, values)
+
+ def _specificity(self, value):
+ """Returns a tuple describing the value's specificity."""
+ return (value != "*",)
+
+ def _value_matches(self, value, item):
+ """Check if a value matches a given accept item."""
+ return item == "*" or item.lower() == value.lower()
+
+ def __getitem__(self, key):
+ """Besides index lookup (getting item n) you can also pass it a string
+ to get the quality for the item. If the item is not in the list, the
+ returned quality is ``0``.
+ """
+ if isinstance(key, str):
+ return self.quality(key)
+ return list.__getitem__(self, key)
+
+ def quality(self, key):
+ """Returns the quality of the key.
+
+ .. versionadded:: 0.6
+ In previous versions you had to use the item-lookup syntax
+ (eg: ``obj[key]`` instead of ``obj.quality(key)``)
+ """
+ for item, quality in self:
+ if self._value_matches(key, item):
+ return quality
+ return 0
+
+ def __contains__(self, value):
+ for item, _quality in self:
+ if self._value_matches(value, item):
+ return True
+ return False
+
+ def __repr__(self):
+ pairs_str = ", ".join(f"({x!r}, {y})" for x, y in self)
+ return f"{type(self).__name__}([{pairs_str}])"
+
+ def index(self, key):
+ """Get the position of an entry or raise :exc:`ValueError`.
+
+ :param key: The key to be looked up.
+
+ .. versionchanged:: 0.5
+ This used to raise :exc:`IndexError`, which was inconsistent
+ with the list API.
+ """
+ if isinstance(key, str):
+ for idx, (item, _quality) in enumerate(self):
+ if self._value_matches(key, item):
+ return idx
+ raise ValueError(key)
+ return list.index(self, key)
+
+ def find(self, key):
+ """Get the position of an entry or return -1.
+
+ :param key: The key to be looked up.
+ """
+ try:
+ return self.index(key)
+ except ValueError:
+ return -1
+
+ def values(self):
+ """Iterate over all values."""
+ for item in self:
+ yield item[0]
+
+ def to_header(self):
+ """Convert the header set into an HTTP header string."""
+ result = []
+ for value, quality in self:
+ if quality != 1:
+ value = f"{value};q={quality}"
+ result.append(value)
+ return ",".join(result)
+
+ def __str__(self):
+ return self.to_header()
+
+ def _best_single_match(self, match):
+ for client_item, quality in self:
+ if self._value_matches(match, client_item):
+ # self is sorted by specificity descending, we can exit
+ return client_item, quality
+ return None
+
+ def best_match(self, matches, default=None):
+ """Returns the best match from a list of possible matches based
+ on the specificity and quality of the client. If two items have the
+ same quality and specificity, the one is returned that comes first.
+
+ :param matches: a list of matches to check for
+ :param default: the value that is returned if none match
+ """
+ result = default
+ best_quality = -1
+ best_specificity = (-1,)
+ for server_item in matches:
+ match = self._best_single_match(server_item)
+ if not match:
+ continue
+ client_item, quality = match
+ specificity = self._specificity(client_item)
+ if quality <= 0 or quality < best_quality:
+ continue
+ # better quality or same quality but more specific => better match
+ if quality > best_quality or specificity > best_specificity:
+ result = server_item
+ best_quality = quality
+ best_specificity = specificity
+ return result
+
+ @property
+ def best(self):
+ """The best match as value."""
+ if self:
+ return self[0][0]
+
+
+_mime_split_re = re.compile(r"/|(?:\s*;\s*)")
+
+
+def _normalize_mime(value):
+ return _mime_split_re.split(value.lower())
+
+
+class MIMEAccept(Accept):
+ """Like :class:`Accept` but with special methods and behavior for
+ mimetypes.
+ """
+
+ def _specificity(self, value):
+ return tuple(x != "*" for x in _mime_split_re.split(value))
+
+ def _value_matches(self, value, item):
+ # item comes from the client, can't match if it's invalid.
+ if "/" not in item:
+ return False
+
+ # value comes from the application, tell the developer when it
+ # doesn't look valid.
+ if "/" not in value:
+ raise ValueError(f"invalid mimetype {value!r}")
+
+ # Split the match value into type, subtype, and a sorted list of parameters.
+ normalized_value = _normalize_mime(value)
+ value_type, value_subtype = normalized_value[:2]
+ value_params = sorted(normalized_value[2:])
+
+ # "*/*" is the only valid value that can start with "*".
+ if value_type == "*" and value_subtype != "*":
+ raise ValueError(f"invalid mimetype {value!r}")
+
+ # Split the accept item into type, subtype, and parameters.
+ normalized_item = _normalize_mime(item)
+ item_type, item_subtype = normalized_item[:2]
+ item_params = sorted(normalized_item[2:])
+
+ # "*/not-*" from the client is invalid, can't match.
+ if item_type == "*" and item_subtype != "*":
+ return False
+
+ return (
+ (item_type == "*" and item_subtype == "*")
+ or (value_type == "*" and value_subtype == "*")
+ ) or (
+ item_type == value_type
+ and (
+ item_subtype == "*"
+ or value_subtype == "*"
+ or (item_subtype == value_subtype and item_params == value_params)
+ )
+ )
+
+ @property
+ def accept_html(self):
+ """True if this object accepts HTML."""
+ return (
+ "text/html" in self or "application/xhtml+xml" in self or self.accept_xhtml
+ )
+
+ @property
+ def accept_xhtml(self):
+ """True if this object accepts XHTML."""
+ return "application/xhtml+xml" in self or "application/xml" in self
+
+ @property
+ def accept_json(self):
+ """True if this object accepts JSON."""
+ return "application/json" in self
+
+
+_locale_delim_re = re.compile(r"[_-]")
+
+
+def _normalize_lang(value):
+ """Process a language tag for matching."""
+ return _locale_delim_re.split(value.lower())
+
+
+class LanguageAccept(Accept):
+ """Like :class:`Accept` but with normalization for language tags."""
+
+ def _value_matches(self, value, item):
+ return item == "*" or _normalize_lang(value) == _normalize_lang(item)
+
+ def best_match(self, matches, default=None):
+ """Given a list of supported values, finds the best match from
+ the list of accepted values.
+
+ Language tags are normalized for the purpose of matching, but
+ are returned unchanged.
+
+ If no exact match is found, this will fall back to matching
+ the first subtag (primary language only), first with the
+ accepted values then with the match values. This partial is not
+ applied to any other language subtags.
+
+ The default is returned if no exact or fallback match is found.
+
+ :param matches: A list of supported languages to find a match.
+ :param default: The value that is returned if none match.
+ """
+ # Look for an exact match first. If a client accepts "en-US",
+ # "en-US" is a valid match at this point.
+ result = super().best_match(matches)
+
+ if result is not None:
+ return result
+
+ # Fall back to accepting primary tags. If a client accepts
+ # "en-US", "en" is a valid match at this point. Need to use
+ # re.split to account for 2 or 3 letter codes.
+ fallback = Accept(
+ [(_locale_delim_re.split(item[0], 1)[0], item[1]) for item in self]
+ )
+ result = fallback.best_match(matches)
+
+ if result is not None:
+ return result
+
+ # Fall back to matching primary tags. If the client accepts
+ # "en", "en-US" is a valid match at this point.
+ fallback_matches = [_locale_delim_re.split(item, 1)[0] for item in matches]
+ result = super().best_match(fallback_matches)
+
+ # Return a value from the original match list. Find the first
+ # original value that starts with the matched primary tag.
+ if result is not None:
+ return next(item for item in matches if item.startswith(result))
+
+ return default
+
+
+class CharsetAccept(Accept):
+ """Like :class:`Accept` but with normalization for charsets."""
+
+ def _value_matches(self, value, item):
+ def _normalize(name):
+ try:
+ return codecs.lookup(name).name
+ except LookupError:
+ return name.lower()
+
+ return item == "*" or _normalize(value) == _normalize(item)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/accept.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/accept.pyi
new file mode 100644
index 0000000..4b74dd9
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/accept.pyi
@@ -0,0 +1,54 @@
+from collections.abc import Iterable
+from collections.abc import Iterator
+from typing import overload
+
+from .structures import ImmutableList
+
+class Accept(ImmutableList[tuple[str, int]]):
+ provided: bool
+ def __init__(
+ self, values: Accept | Iterable[tuple[str, float]] | None = None
+ ) -> None: ...
+ def _specificity(self, value: str) -> tuple[bool, ...]: ...
+ def _value_matches(self, value: str, item: str) -> bool: ...
+ @overload # type: ignore
+ def __getitem__(self, key: str) -> int: ...
+ @overload
+ def __getitem__(self, key: int) -> tuple[str, int]: ...
+ @overload
+ def __getitem__(self, key: slice) -> Iterable[tuple[str, int]]: ...
+ def quality(self, key: str) -> int: ...
+ def __contains__(self, value: str) -> bool: ... # type: ignore
+ def index(self, key: str) -> int: ... # type: ignore
+ def find(self, key: str) -> int: ...
+ def values(self) -> Iterator[str]: ...
+ def to_header(self) -> str: ...
+ def _best_single_match(self, match: str) -> tuple[str, int] | None: ...
+ @overload
+ def best_match(self, matches: Iterable[str], default: str) -> str: ...
+ @overload
+ def best_match(
+ self, matches: Iterable[str], default: str | None = None
+ ) -> str | None: ...
+ @property
+ def best(self) -> str: ...
+
+def _normalize_mime(value: str) -> list[str]: ...
+
+class MIMEAccept(Accept):
+ def _specificity(self, value: str) -> tuple[bool, ...]: ...
+ def _value_matches(self, value: str, item: str) -> bool: ...
+ @property
+ def accept_html(self) -> bool: ...
+ @property
+ def accept_xhtml(self) -> bool: ...
+ @property
+ def accept_json(self) -> bool: ...
+
+def _normalize_lang(value: str) -> list[str]: ...
+
+class LanguageAccept(Accept):
+ def _value_matches(self, value: str, item: str) -> bool: ...
+
+class CharsetAccept(Accept):
+ def _value_matches(self, value: str, item: str) -> bool: ...
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/auth.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/auth.py
new file mode 100644
index 0000000..a3ca0de
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/auth.py
@@ -0,0 +1,316 @@
+from __future__ import annotations
+
+import base64
+import binascii
+import typing as t
+
+from ..http import dump_header
+from ..http import parse_dict_header
+from ..http import quote_header_value
+from .structures import CallbackDict
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+
+class Authorization:
+ """Represents the parts of an ``Authorization`` request header.
+
+ :attr:`.Request.authorization` returns an instance if the header is set.
+
+ An instance can be used with the test :class:`.Client` request methods' ``auth``
+ parameter to send the header in test requests.
+
+ Depending on the auth scheme, either :attr:`parameters` or :attr:`token` will be
+ set. The ``Basic`` scheme's token is decoded into the ``username`` and ``password``
+ parameters.
+
+ For convenience, ``auth["key"]`` and ``auth.key`` both access the key in the
+ :attr:`parameters` dict, along with ``auth.get("key")`` and ``"key" in auth``.
+
+ .. versionchanged:: 2.3
+ The ``token`` parameter and attribute was added to support auth schemes that use
+ a token instead of parameters, such as ``Bearer``.
+
+ .. versionchanged:: 2.3
+ The object is no longer a ``dict``.
+
+ .. versionchanged:: 0.5
+ The object is an immutable dict.
+ """
+
+ def __init__(
+ self,
+ auth_type: str,
+ data: dict[str, str | None] | None = None,
+ token: str | None = None,
+ ) -> None:
+ self.type = auth_type
+ """The authorization scheme, like ``basic``, ``digest``, or ``bearer``."""
+
+ if data is None:
+ data = {}
+
+ self.parameters = data
+ """A dict of parameters parsed from the header. Either this or :attr:`token`
+ will have a value for a given scheme.
+ """
+
+ self.token = token
+ """A token parsed from the header. Either this or :attr:`parameters` will have a
+ value for a given scheme.
+
+ .. versionadded:: 2.3
+ """
+
+ def __getattr__(self, name: str) -> str | None:
+ return self.parameters.get(name)
+
+ def __getitem__(self, name: str) -> str | None:
+ return self.parameters.get(name)
+
+ def get(self, key: str, default: str | None = None) -> str | None:
+ return self.parameters.get(key, default)
+
+ def __contains__(self, key: str) -> bool:
+ return key in self.parameters
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Authorization):
+ return NotImplemented
+
+ return (
+ other.type == self.type
+ and other.token == self.token
+ and other.parameters == self.parameters
+ )
+
+ @classmethod
+ def from_header(cls, value: str | None) -> te.Self | None:
+ """Parse an ``Authorization`` header value and return an instance, or ``None``
+ if the value is empty.
+
+ :param value: The header value to parse.
+
+ .. versionadded:: 2.3
+ """
+ if not value:
+ return None
+
+ scheme, _, rest = value.partition(" ")
+ scheme = scheme.lower()
+ rest = rest.strip()
+
+ if scheme == "basic":
+ try:
+ username, _, password = base64.b64decode(rest).decode().partition(":")
+ except (binascii.Error, UnicodeError):
+ return None
+
+ return cls(scheme, {"username": username, "password": password})
+
+ if "=" in rest.rstrip("="):
+ # = that is not trailing, this is parameters.
+ return cls(scheme, parse_dict_header(rest), None)
+
+ # No = or only trailing =, this is a token.
+ return cls(scheme, None, rest)
+
+ def to_header(self) -> str:
+ """Produce an ``Authorization`` header value representing this data.
+
+ .. versionadded:: 2.0
+ """
+ if self.type == "basic":
+ value = base64.b64encode(
+ f"{self.username}:{self.password}".encode()
+ ).decode("ascii")
+ return f"Basic {value}"
+
+ if self.token is not None:
+ return f"{self.type.title()} {self.token}"
+
+ return f"{self.type.title()} {dump_header(self.parameters)}"
+
+ def __str__(self) -> str:
+ return self.to_header()
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.to_header()}>"
+
+
+class WWWAuthenticate:
+ """Represents the parts of a ``WWW-Authenticate`` response header.
+
+ Set :attr:`.Response.www_authenticate` to an instance of list of instances to set
+ values for this header in the response. Modifying this instance will modify the
+ header value.
+
+ Depending on the auth scheme, either :attr:`parameters` or :attr:`token` should be
+ set. The ``Basic`` scheme will encode ``username`` and ``password`` parameters to a
+ token.
+
+ For convenience, ``auth["key"]`` and ``auth.key`` both act on the :attr:`parameters`
+ dict, and can be used to get, set, or delete parameters. ``auth.get("key")`` and
+ ``"key" in auth`` are also provided.
+
+ .. versionchanged:: 2.3
+ The ``token`` parameter and attribute was added to support auth schemes that use
+ a token instead of parameters, such as ``Bearer``.
+
+ .. versionchanged:: 2.3
+ The object is no longer a ``dict``.
+
+ .. versionchanged:: 2.3
+ The ``on_update`` parameter was removed.
+ """
+
+ def __init__(
+ self,
+ auth_type: str,
+ values: dict[str, str | None] | None = None,
+ token: str | None = None,
+ ):
+ self._type = auth_type.lower()
+ self._parameters: dict[str, str | None] = CallbackDict(
+ values, lambda _: self._trigger_on_update()
+ )
+ self._token = token
+ self._on_update: t.Callable[[WWWAuthenticate], None] | None = None
+
+ def _trigger_on_update(self) -> None:
+ if self._on_update is not None:
+ self._on_update(self)
+
+ @property
+ def type(self) -> str:
+ """The authorization scheme, like ``basic``, ``digest``, or ``bearer``."""
+ return self._type
+
+ @type.setter
+ def type(self, value: str) -> None:
+ self._type = value
+ self._trigger_on_update()
+
+ @property
+ def parameters(self) -> dict[str, str | None]:
+ """A dict of parameters for the header. Only one of this or :attr:`token` should
+ have a value for a given scheme.
+ """
+ return self._parameters
+
+ @parameters.setter
+ def parameters(self, value: dict[str, str]) -> None:
+ self._parameters = CallbackDict(value, lambda _: self._trigger_on_update())
+ self._trigger_on_update()
+
+ @property
+ def token(self) -> str | None:
+ """A dict of parameters for the header. Only one of this or :attr:`token` should
+ have a value for a given scheme.
+ """
+ return self._token
+
+ @token.setter
+ def token(self, value: str | None) -> None:
+ """A token for the header. Only one of this or :attr:`parameters` should have a
+ value for a given scheme.
+
+ .. versionadded:: 2.3
+ """
+ self._token = value
+ self._trigger_on_update()
+
+ def __getitem__(self, key: str) -> str | None:
+ return self.parameters.get(key)
+
+ def __setitem__(self, key: str, value: str | None) -> None:
+ if value is None:
+ if key in self.parameters:
+ del self.parameters[key]
+ else:
+ self.parameters[key] = value
+
+ self._trigger_on_update()
+
+ def __delitem__(self, key: str) -> None:
+ if key in self.parameters:
+ del self.parameters[key]
+ self._trigger_on_update()
+
+ def __getattr__(self, name: str) -> str | None:
+ return self[name]
+
+ def __setattr__(self, name: str, value: str | None) -> None:
+ if name in {"_type", "_parameters", "_token", "_on_update"}:
+ super().__setattr__(name, value)
+ else:
+ self[name] = value
+
+ def __delattr__(self, name: str) -> None:
+ del self[name]
+
+ def __contains__(self, key: str) -> bool:
+ return key in self.parameters
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, WWWAuthenticate):
+ return NotImplemented
+
+ return (
+ other.type == self.type
+ and other.token == self.token
+ and other.parameters == self.parameters
+ )
+
+ def get(self, key: str, default: str | None = None) -> str | None:
+ return self.parameters.get(key, default)
+
+ @classmethod
+ def from_header(cls, value: str | None) -> te.Self | None:
+ """Parse a ``WWW-Authenticate`` header value and return an instance, or ``None``
+ if the value is empty.
+
+ :param value: The header value to parse.
+
+ .. versionadded:: 2.3
+ """
+ if not value:
+ return None
+
+ scheme, _, rest = value.partition(" ")
+ scheme = scheme.lower()
+ rest = rest.strip()
+
+ if "=" in rest.rstrip("="):
+ # = that is not trailing, this is parameters.
+ return cls(scheme, parse_dict_header(rest), None)
+
+ # No = or only trailing =, this is a token.
+ return cls(scheme, None, rest)
+
+ def to_header(self) -> str:
+ """Produce a ``WWW-Authenticate`` header value representing this data."""
+ if self.token is not None:
+ return f"{self.type.title()} {self.token}"
+
+ if self.type == "digest":
+ items = []
+
+ for key, value in self.parameters.items():
+ if key in {"realm", "domain", "nonce", "opaque", "qop"}:
+ value = quote_header_value(value, allow_token=False)
+ else:
+ value = quote_header_value(value)
+
+ items.append(f"{key}={value}")
+
+ return f"Digest {', '.join(items)}"
+
+ return f"{self.type.title()} {dump_header(self.parameters)}"
+
+ def __str__(self) -> str:
+ return self.to_header()
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.to_header()}>"
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/cache_control.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/cache_control.py
new file mode 100644
index 0000000..bff4c18
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/cache_control.py
@@ -0,0 +1,175 @@
+from __future__ import annotations
+
+from .mixins import ImmutableDictMixin
+from .mixins import UpdateDictMixin
+
+
+def cache_control_property(key, empty, type):
+ """Return a new property object for a cache header. Useful if you
+ want to add support for a cache extension in a subclass.
+
+ .. versionchanged:: 2.0
+ Renamed from ``cache_property``.
+ """
+ return property(
+ lambda x: x._get_cache_value(key, empty, type),
+ lambda x, v: x._set_cache_value(key, v, type),
+ lambda x: x._del_cache_value(key),
+ f"accessor for {key!r}",
+ )
+
+
+class _CacheControl(UpdateDictMixin, dict):
+ """Subclass of a dict that stores values for a Cache-Control header. It
+ has accessors for all the cache-control directives specified in RFC 2616.
+ The class does not differentiate between request and response directives.
+
+ Because the cache-control directives in the HTTP header use dashes the
+ python descriptors use underscores for that.
+
+ To get a header of the :class:`CacheControl` object again you can convert
+ the object into a string or call the :meth:`to_header` method. If you plan
+ to subclass it and add your own items have a look at the sourcecode for
+ that class.
+
+ .. versionchanged:: 2.1.0
+ Setting int properties such as ``max_age`` will convert the
+ value to an int.
+
+ .. versionchanged:: 0.4
+
+ Setting `no_cache` or `private` to boolean `True` will set the implicit
+ none-value which is ``*``:
+
+ >>> cc = ResponseCacheControl()
+ >>> cc.no_cache = True
+ >>> cc
+ <ResponseCacheControl 'no-cache'>
+ >>> cc.no_cache
+ '*'
+ >>> cc.no_cache = None
+ >>> cc
+ <ResponseCacheControl ''>
+
+ In versions before 0.5 the behavior documented here affected the now
+ no longer existing `CacheControl` class.
+ """
+
+ no_cache = cache_control_property("no-cache", "*", None)
+ no_store = cache_control_property("no-store", None, bool)
+ max_age = cache_control_property("max-age", -1, int)
+ no_transform = cache_control_property("no-transform", None, None)
+
+ def __init__(self, values=(), on_update=None):
+ dict.__init__(self, values or ())
+ self.on_update = on_update
+ self.provided = values is not None
+
+ def _get_cache_value(self, key, empty, type):
+ """Used internally by the accessor properties."""
+ if type is bool:
+ return key in self
+ if key in self:
+ value = self[key]
+ if value is None:
+ return empty
+ elif type is not None:
+ try:
+ value = type(value)
+ except ValueError:
+ pass
+ return value
+ return None
+
+ def _set_cache_value(self, key, value, type):
+ """Used internally by the accessor properties."""
+ if type is bool:
+ if value:
+ self[key] = None
+ else:
+ self.pop(key, None)
+ else:
+ if value is None:
+ self.pop(key, None)
+ elif value is True:
+ self[key] = None
+ else:
+ if type is not None:
+ self[key] = type(value)
+ else:
+ self[key] = value
+
+ def _del_cache_value(self, key):
+ """Used internally by the accessor properties."""
+ if key in self:
+ del self[key]
+
+ def to_header(self):
+ """Convert the stored values into a cache control header."""
+ return http.dump_header(self)
+
+ def __str__(self):
+ return self.to_header()
+
+ def __repr__(self):
+ kv_str = " ".join(f"{k}={v!r}" for k, v in sorted(self.items()))
+ return f"<{type(self).__name__} {kv_str}>"
+
+ cache_property = staticmethod(cache_control_property)
+
+
+class RequestCacheControl(ImmutableDictMixin, _CacheControl):
+ """A cache control for requests. This is immutable and gives access
+ to all the request-relevant cache control headers.
+
+ To get a header of the :class:`RequestCacheControl` object again you can
+ convert the object into a string or call the :meth:`to_header` method. If
+ you plan to subclass it and add your own items have a look at the sourcecode
+ for that class.
+
+ .. versionchanged:: 2.1.0
+ Setting int properties such as ``max_age`` will convert the
+ value to an int.
+
+ .. versionadded:: 0.5
+ In previous versions a `CacheControl` class existed that was used
+ both for request and response.
+ """
+
+ max_stale = cache_control_property("max-stale", "*", int)
+ min_fresh = cache_control_property("min-fresh", "*", int)
+ only_if_cached = cache_control_property("only-if-cached", None, bool)
+
+
+class ResponseCacheControl(_CacheControl):
+ """A cache control for responses. Unlike :class:`RequestCacheControl`
+ this is mutable and gives access to response-relevant cache control
+ headers.
+
+ To get a header of the :class:`ResponseCacheControl` object again you can
+ convert the object into a string or call the :meth:`to_header` method. If
+ you plan to subclass it and add your own items have a look at the sourcecode
+ for that class.
+
+ .. versionchanged:: 2.1.1
+ ``s_maxage`` converts the value to an int.
+
+ .. versionchanged:: 2.1.0
+ Setting int properties such as ``max_age`` will convert the
+ value to an int.
+
+ .. versionadded:: 0.5
+ In previous versions a `CacheControl` class existed that was used
+ both for request and response.
+ """
+
+ public = cache_control_property("public", None, bool)
+ private = cache_control_property("private", "*", None)
+ must_revalidate = cache_control_property("must-revalidate", None, bool)
+ proxy_revalidate = cache_control_property("proxy-revalidate", None, bool)
+ s_maxage = cache_control_property("s-maxage", None, int)
+ immutable = cache_control_property("immutable", None, bool)
+
+
+# circular dependencies
+from .. import http
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/cache_control.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/cache_control.pyi
new file mode 100644
index 0000000..54ec020
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/cache_control.pyi
@@ -0,0 +1,115 @@
+from collections.abc import Callable
+from collections.abc import Iterable
+from collections.abc import Mapping
+from typing import TypeVar
+
+from .mixins import ImmutableDictMixin
+from .mixins import UpdateDictMixin
+
+T = TypeVar("T")
+_CPT = TypeVar("_CPT", str, int, bool)
+
+def cache_control_property(
+ key: str, empty: _CPT | None, type: type[_CPT]
+) -> property: ...
+
+class _CacheControl(
+ UpdateDictMixin[str, str | int | bool | None], dict[str, str | int | bool | None]
+):
+ provided: bool
+ def __init__(
+ self,
+ values: Mapping[str, str | int | bool | None]
+ | Iterable[tuple[str, str | int | bool | None]] = (),
+ on_update: Callable[[_CacheControl], None] | None = None,
+ ) -> None: ...
+ @property
+ def no_cache(self) -> bool | None: ...
+ @no_cache.setter
+ def no_cache(self, value: bool | None) -> None: ...
+ @no_cache.deleter
+ def no_cache(self) -> None: ...
+ @property
+ def no_store(self) -> bool | None: ...
+ @no_store.setter
+ def no_store(self, value: bool | None) -> None: ...
+ @no_store.deleter
+ def no_store(self) -> None: ...
+ @property
+ def max_age(self) -> int | None: ...
+ @max_age.setter
+ def max_age(self, value: int | None) -> None: ...
+ @max_age.deleter
+ def max_age(self) -> None: ...
+ @property
+ def no_transform(self) -> bool | None: ...
+ @no_transform.setter
+ def no_transform(self, value: bool | None) -> None: ...
+ @no_transform.deleter
+ def no_transform(self) -> None: ...
+ def _get_cache_value(self, key: str, empty: T | None, type: type[T]) -> T: ...
+ def _set_cache_value(self, key: str, value: T | None, type: type[T]) -> None: ...
+ def _del_cache_value(self, key: str) -> None: ...
+ def to_header(self) -> str: ...
+ @staticmethod
+ def cache_property(key: str, empty: _CPT | None, type: type[_CPT]) -> property: ...
+
+class RequestCacheControl( # type: ignore[misc]
+ ImmutableDictMixin[str, str | int | bool | None], _CacheControl
+):
+ @property
+ def max_stale(self) -> int | None: ...
+ @max_stale.setter
+ def max_stale(self, value: int | None) -> None: ...
+ @max_stale.deleter
+ def max_stale(self) -> None: ...
+ @property
+ def min_fresh(self) -> int | None: ...
+ @min_fresh.setter
+ def min_fresh(self, value: int | None) -> None: ...
+ @min_fresh.deleter
+ def min_fresh(self) -> None: ...
+ @property
+ def only_if_cached(self) -> bool | None: ...
+ @only_if_cached.setter
+ def only_if_cached(self, value: bool | None) -> None: ...
+ @only_if_cached.deleter
+ def only_if_cached(self) -> None: ...
+
+class ResponseCacheControl(_CacheControl):
+ @property
+ def public(self) -> bool | None: ...
+ @public.setter
+ def public(self, value: bool | None) -> None: ...
+ @public.deleter
+ def public(self) -> None: ...
+ @property
+ def private(self) -> bool | None: ...
+ @private.setter
+ def private(self, value: bool | None) -> None: ...
+ @private.deleter
+ def private(self) -> None: ...
+ @property
+ def must_revalidate(self) -> bool | None: ...
+ @must_revalidate.setter
+ def must_revalidate(self, value: bool | None) -> None: ...
+ @must_revalidate.deleter
+ def must_revalidate(self) -> None: ...
+ @property
+ def proxy_revalidate(self) -> bool | None: ...
+ @proxy_revalidate.setter
+ def proxy_revalidate(self, value: bool | None) -> None: ...
+ @proxy_revalidate.deleter
+ def proxy_revalidate(self) -> None: ...
+ @property
+ def s_maxage(self) -> int | None: ...
+ @s_maxage.setter
+ def s_maxage(self, value: int | None) -> None: ...
+ @s_maxage.deleter
+ def s_maxage(self) -> None: ...
+ @property
+ def immutable(self) -> bool | None: ...
+ @immutable.setter
+ def immutable(self, value: bool | None) -> None: ...
+ @immutable.deleter
+ def immutable(self) -> None: ...
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/csp.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/csp.py
new file mode 100644
index 0000000..dde9414
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/csp.py
@@ -0,0 +1,94 @@
+from __future__ import annotations
+
+from .mixins import UpdateDictMixin
+
+
+def csp_property(key):
+ """Return a new property object for a content security policy header.
+ Useful if you want to add support for a csp extension in a
+ subclass.
+ """
+ return property(
+ lambda x: x._get_value(key),
+ lambda x, v: x._set_value(key, v),
+ lambda x: x._del_value(key),
+ f"accessor for {key!r}",
+ )
+
+
+class ContentSecurityPolicy(UpdateDictMixin, dict):
+ """Subclass of a dict that stores values for a Content Security Policy
+ header. It has accessors for all the level 3 policies.
+
+ Because the csp directives in the HTTP header use dashes the
+ python descriptors use underscores for that.
+
+ To get a header of the :class:`ContentSecuirtyPolicy` object again
+ you can convert the object into a string or call the
+ :meth:`to_header` method. If you plan to subclass it and add your
+ own items have a look at the sourcecode for that class.
+
+ .. versionadded:: 1.0.0
+ Support for Content Security Policy headers was added.
+
+ """
+
+ base_uri = csp_property("base-uri")
+ child_src = csp_property("child-src")
+ connect_src = csp_property("connect-src")
+ default_src = csp_property("default-src")
+ font_src = csp_property("font-src")
+ form_action = csp_property("form-action")
+ frame_ancestors = csp_property("frame-ancestors")
+ frame_src = csp_property("frame-src")
+ img_src = csp_property("img-src")
+ manifest_src = csp_property("manifest-src")
+ media_src = csp_property("media-src")
+ navigate_to = csp_property("navigate-to")
+ object_src = csp_property("object-src")
+ prefetch_src = csp_property("prefetch-src")
+ plugin_types = csp_property("plugin-types")
+ report_to = csp_property("report-to")
+ report_uri = csp_property("report-uri")
+ sandbox = csp_property("sandbox")
+ script_src = csp_property("script-src")
+ script_src_attr = csp_property("script-src-attr")
+ script_src_elem = csp_property("script-src-elem")
+ style_src = csp_property("style-src")
+ style_src_attr = csp_property("style-src-attr")
+ style_src_elem = csp_property("style-src-elem")
+ worker_src = csp_property("worker-src")
+
+ def __init__(self, values=(), on_update=None):
+ dict.__init__(self, values or ())
+ self.on_update = on_update
+ self.provided = values is not None
+
+ def _get_value(self, key):
+ """Used internally by the accessor properties."""
+ return self.get(key)
+
+ def _set_value(self, key, value):
+ """Used internally by the accessor properties."""
+ if value is None:
+ self.pop(key, None)
+ else:
+ self[key] = value
+
+ def _del_value(self, key):
+ """Used internally by the accessor properties."""
+ if key in self:
+ del self[key]
+
+ def to_header(self):
+ """Convert the stored values into a cache control header."""
+ from ..http import dump_csp_header
+
+ return dump_csp_header(self)
+
+ def __str__(self):
+ return self.to_header()
+
+ def __repr__(self):
+ kv_str = " ".join(f"{k}={v!r}" for k, v in sorted(self.items()))
+ return f"<{type(self).__name__} {kv_str}>"
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/csp.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/csp.pyi
new file mode 100644
index 0000000..f9e2ac0
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/csp.pyi
@@ -0,0 +1,169 @@
+from collections.abc import Callable
+from collections.abc import Iterable
+from collections.abc import Mapping
+
+from .mixins import UpdateDictMixin
+
+def csp_property(key: str) -> property: ...
+
+class ContentSecurityPolicy(UpdateDictMixin[str, str], dict[str, str]):
+ @property
+ def base_uri(self) -> str | None: ...
+ @base_uri.setter
+ def base_uri(self, value: str | None) -> None: ...
+ @base_uri.deleter
+ def base_uri(self) -> None: ...
+ @property
+ def child_src(self) -> str | None: ...
+ @child_src.setter
+ def child_src(self, value: str | None) -> None: ...
+ @child_src.deleter
+ def child_src(self) -> None: ...
+ @property
+ def connect_src(self) -> str | None: ...
+ @connect_src.setter
+ def connect_src(self, value: str | None) -> None: ...
+ @connect_src.deleter
+ def connect_src(self) -> None: ...
+ @property
+ def default_src(self) -> str | None: ...
+ @default_src.setter
+ def default_src(self, value: str | None) -> None: ...
+ @default_src.deleter
+ def default_src(self) -> None: ...
+ @property
+ def font_src(self) -> str | None: ...
+ @font_src.setter
+ def font_src(self, value: str | None) -> None: ...
+ @font_src.deleter
+ def font_src(self) -> None: ...
+ @property
+ def form_action(self) -> str | None: ...
+ @form_action.setter
+ def form_action(self, value: str | None) -> None: ...
+ @form_action.deleter
+ def form_action(self) -> None: ...
+ @property
+ def frame_ancestors(self) -> str | None: ...
+ @frame_ancestors.setter
+ def frame_ancestors(self, value: str | None) -> None: ...
+ @frame_ancestors.deleter
+ def frame_ancestors(self) -> None: ...
+ @property
+ def frame_src(self) -> str | None: ...
+ @frame_src.setter
+ def frame_src(self, value: str | None) -> None: ...
+ @frame_src.deleter
+ def frame_src(self) -> None: ...
+ @property
+ def img_src(self) -> str | None: ...
+ @img_src.setter
+ def img_src(self, value: str | None) -> None: ...
+ @img_src.deleter
+ def img_src(self) -> None: ...
+ @property
+ def manifest_src(self) -> str | None: ...
+ @manifest_src.setter
+ def manifest_src(self, value: str | None) -> None: ...
+ @manifest_src.deleter
+ def manifest_src(self) -> None: ...
+ @property
+ def media_src(self) -> str | None: ...
+ @media_src.setter
+ def media_src(self, value: str | None) -> None: ...
+ @media_src.deleter
+ def media_src(self) -> None: ...
+ @property
+ def navigate_to(self) -> str | None: ...
+ @navigate_to.setter
+ def navigate_to(self, value: str | None) -> None: ...
+ @navigate_to.deleter
+ def navigate_to(self) -> None: ...
+ @property
+ def object_src(self) -> str | None: ...
+ @object_src.setter
+ def object_src(self, value: str | None) -> None: ...
+ @object_src.deleter
+ def object_src(self) -> None: ...
+ @property
+ def prefetch_src(self) -> str | None: ...
+ @prefetch_src.setter
+ def prefetch_src(self, value: str | None) -> None: ...
+ @prefetch_src.deleter
+ def prefetch_src(self) -> None: ...
+ @property
+ def plugin_types(self) -> str | None: ...
+ @plugin_types.setter
+ def plugin_types(self, value: str | None) -> None: ...
+ @plugin_types.deleter
+ def plugin_types(self) -> None: ...
+ @property
+ def report_to(self) -> str | None: ...
+ @report_to.setter
+ def report_to(self, value: str | None) -> None: ...
+ @report_to.deleter
+ def report_to(self) -> None: ...
+ @property
+ def report_uri(self) -> str | None: ...
+ @report_uri.setter
+ def report_uri(self, value: str | None) -> None: ...
+ @report_uri.deleter
+ def report_uri(self) -> None: ...
+ @property
+ def sandbox(self) -> str | None: ...
+ @sandbox.setter
+ def sandbox(self, value: str | None) -> None: ...
+ @sandbox.deleter
+ def sandbox(self) -> None: ...
+ @property
+ def script_src(self) -> str | None: ...
+ @script_src.setter
+ def script_src(self, value: str | None) -> None: ...
+ @script_src.deleter
+ def script_src(self) -> None: ...
+ @property
+ def script_src_attr(self) -> str | None: ...
+ @script_src_attr.setter
+ def script_src_attr(self, value: str | None) -> None: ...
+ @script_src_attr.deleter
+ def script_src_attr(self) -> None: ...
+ @property
+ def script_src_elem(self) -> str | None: ...
+ @script_src_elem.setter
+ def script_src_elem(self, value: str | None) -> None: ...
+ @script_src_elem.deleter
+ def script_src_elem(self) -> None: ...
+ @property
+ def style_src(self) -> str | None: ...
+ @style_src.setter
+ def style_src(self, value: str | None) -> None: ...
+ @style_src.deleter
+ def style_src(self) -> None: ...
+ @property
+ def style_src_attr(self) -> str | None: ...
+ @style_src_attr.setter
+ def style_src_attr(self, value: str | None) -> None: ...
+ @style_src_attr.deleter
+ def style_src_attr(self) -> None: ...
+ @property
+ def style_src_elem(self) -> str | None: ...
+ @style_src_elem.setter
+ def style_src_elem(self, value: str | None) -> None: ...
+ @style_src_elem.deleter
+ def style_src_elem(self) -> None: ...
+ @property
+ def worker_src(self) -> str | None: ...
+ @worker_src.setter
+ def worker_src(self, value: str | None) -> None: ...
+ @worker_src.deleter
+ def worker_src(self) -> None: ...
+ provided: bool
+ def __init__(
+ self,
+ values: Mapping[str, str] | Iterable[tuple[str, str]] = (),
+ on_update: Callable[[ContentSecurityPolicy], None] | None = None,
+ ) -> None: ...
+ def _get_value(self, key: str) -> str | None: ...
+ def _set_value(self, key: str, value: str) -> None: ...
+ def _del_value(self, key: str) -> None: ...
+ def to_header(self) -> str: ...
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/etag.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/etag.py
new file mode 100644
index 0000000..747d996
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/etag.py
@@ -0,0 +1,95 @@
+from __future__ import annotations
+
+from collections.abc import Collection
+
+
+class ETags(Collection):
+ """A set that can be used to check if one etag is present in a collection
+ of etags.
+ """
+
+ def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
+ if not star_tag and strong_etags:
+ self._strong = frozenset(strong_etags)
+ else:
+ self._strong = frozenset()
+
+ self._weak = frozenset(weak_etags or ())
+ self.star_tag = star_tag
+
+ def as_set(self, include_weak=False):
+ """Convert the `ETags` object into a python set. Per default all the
+ weak etags are not part of this set."""
+ rv = set(self._strong)
+ if include_weak:
+ rv.update(self._weak)
+ return rv
+
+ def is_weak(self, etag):
+ """Check if an etag is weak."""
+ return etag in self._weak
+
+ def is_strong(self, etag):
+ """Check if an etag is strong."""
+ return etag in self._strong
+
+ def contains_weak(self, etag):
+ """Check if an etag is part of the set including weak and strong tags."""
+ return self.is_weak(etag) or self.contains(etag)
+
+ def contains(self, etag):
+ """Check if an etag is part of the set ignoring weak tags.
+ It is also possible to use the ``in`` operator.
+ """
+ if self.star_tag:
+ return True
+ return self.is_strong(etag)
+
+ def contains_raw(self, etag):
+ """When passed a quoted tag it will check if this tag is part of the
+ set. If the tag is weak it is checked against weak and strong tags,
+ otherwise strong only."""
+ from ..http import unquote_etag
+
+ etag, weak = unquote_etag(etag)
+ if weak:
+ return self.contains_weak(etag)
+ return self.contains(etag)
+
+ def to_header(self):
+ """Convert the etags set into a HTTP header string."""
+ if self.star_tag:
+ return "*"
+ return ", ".join(
+ [f'"{x}"' for x in self._strong] + [f'W/"{x}"' for x in self._weak]
+ )
+
+ def __call__(self, etag=None, data=None, include_weak=False):
+ if [etag, data].count(None) != 1:
+ raise TypeError("either tag or data required, but at least one")
+ if etag is None:
+ from ..http import generate_etag
+
+ etag = generate_etag(data)
+ if include_weak:
+ if etag in self._weak:
+ return True
+ return etag in self._strong
+
+ def __bool__(self):
+ return bool(self.star_tag or self._strong or self._weak)
+
+ def __str__(self):
+ return self.to_header()
+
+ def __len__(self):
+ return len(self._strong)
+
+ def __iter__(self):
+ return iter(self._strong)
+
+ def __contains__(self, etag):
+ return self.contains(etag)
+
+ def __repr__(self):
+ return f"<{type(self).__name__} {str(self)!r}>"
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/etag.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/etag.pyi
new file mode 100644
index 0000000..88e54f1
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/etag.pyi
@@ -0,0 +1,30 @@
+from collections.abc import Collection
+from collections.abc import Iterable
+from collections.abc import Iterator
+
+class ETags(Collection[str]):
+ _strong: frozenset[str]
+ _weak: frozenset[str]
+ star_tag: bool
+ def __init__(
+ self,
+ strong_etags: Iterable[str] | None = None,
+ weak_etags: Iterable[str] | None = None,
+ star_tag: bool = False,
+ ) -> None: ...
+ def as_set(self, include_weak: bool = False) -> set[str]: ...
+ def is_weak(self, etag: str) -> bool: ...
+ def is_strong(self, etag: str) -> bool: ...
+ def contains_weak(self, etag: str) -> bool: ...
+ def contains(self, etag: str) -> bool: ...
+ def contains_raw(self, etag: str) -> bool: ...
+ def to_header(self) -> str: ...
+ def __call__(
+ self,
+ etag: str | None = None,
+ data: bytes | None = None,
+ include_weak: bool = False,
+ ) -> bool: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[str]: ...
+ def __contains__(self, item: str) -> bool: ... # type: ignore
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/file_storage.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/file_storage.py
new file mode 100644
index 0000000..e878a56
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/file_storage.py
@@ -0,0 +1,196 @@
+from __future__ import annotations
+
+import mimetypes
+from io import BytesIO
+from os import fsdecode
+from os import fspath
+
+from .._internal import _plain_int
+from .structures import MultiDict
+
+
+class FileStorage:
+ """The :class:`FileStorage` class is a thin wrapper over incoming files.
+ It is used by the request object to represent uploaded files. All the
+ attributes of the wrapper stream are proxied by the file storage so
+ it's possible to do ``storage.read()`` instead of the long form
+ ``storage.stream.read()``.
+ """
+
+ def __init__(
+ self,
+ stream=None,
+ filename=None,
+ name=None,
+ content_type=None,
+ content_length=None,
+ headers=None,
+ ):
+ self.name = name
+ self.stream = stream or BytesIO()
+
+ # If no filename is provided, attempt to get the filename from
+ # the stream object. Python names special streams like
+ # ``<stderr>`` with angular brackets, skip these streams.
+ if filename is None:
+ filename = getattr(stream, "name", None)
+
+ if filename is not None:
+ filename = fsdecode(filename)
+
+ if filename and filename[0] == "<" and filename[-1] == ">":
+ filename = None
+ else:
+ filename = fsdecode(filename)
+
+ self.filename = filename
+
+ if headers is None:
+ from .headers import Headers
+
+ headers = Headers()
+ self.headers = headers
+ if content_type is not None:
+ headers["Content-Type"] = content_type
+ if content_length is not None:
+ headers["Content-Length"] = str(content_length)
+
+ def _parse_content_type(self):
+ if not hasattr(self, "_parsed_content_type"):
+ self._parsed_content_type = http.parse_options_header(self.content_type)
+
+ @property
+ def content_type(self):
+ """The content-type sent in the header. Usually not available"""
+ return self.headers.get("content-type")
+
+ @property
+ def content_length(self):
+ """The content-length sent in the header. Usually not available"""
+ if "content-length" in self.headers:
+ try:
+ return _plain_int(self.headers["content-length"])
+ except ValueError:
+ pass
+
+ return 0
+
+ @property
+ def mimetype(self):
+ """Like :attr:`content_type`, but without parameters (eg, without
+ charset, type etc.) and always lowercase. For example if the content
+ type is ``text/HTML; charset=utf-8`` the mimetype would be
+ ``'text/html'``.
+
+ .. versionadded:: 0.7
+ """
+ self._parse_content_type()
+ return self._parsed_content_type[0].lower()
+
+ @property
+ def mimetype_params(self):
+ """The mimetype parameters as dict. For example if the content
+ type is ``text/html; charset=utf-8`` the params would be
+ ``{'charset': 'utf-8'}``.
+
+ .. versionadded:: 0.7
+ """
+ self._parse_content_type()
+ return self._parsed_content_type[1]
+
+ def save(self, dst, buffer_size=16384):
+ """Save the file to a destination path or file object. If the
+ destination is a file object you have to close it yourself after the
+ call. The buffer size is the number of bytes held in memory during
+ the copy process. It defaults to 16KB.
+
+ For secure file saving also have a look at :func:`secure_filename`.
+
+ :param dst: a filename, :class:`os.PathLike`, or open file
+ object to write to.
+ :param buffer_size: Passed as the ``length`` parameter of
+ :func:`shutil.copyfileobj`.
+
+ .. versionchanged:: 1.0
+ Supports :mod:`pathlib`.
+ """
+ from shutil import copyfileobj
+
+ close_dst = False
+
+ if hasattr(dst, "__fspath__"):
+ dst = fspath(dst)
+
+ if isinstance(dst, str):
+ dst = open(dst, "wb")
+ close_dst = True
+
+ try:
+ copyfileobj(self.stream, dst, buffer_size)
+ finally:
+ if close_dst:
+ dst.close()
+
+ def close(self):
+ """Close the underlying file if possible."""
+ try:
+ self.stream.close()
+ except Exception:
+ pass
+
+ def __bool__(self):
+ return bool(self.filename)
+
+ def __getattr__(self, name):
+ try:
+ return getattr(self.stream, name)
+ except AttributeError:
+ # SpooledTemporaryFile doesn't implement IOBase, get the
+ # attribute from its backing file instead.
+ # https://github.com/python/cpython/pull/3249
+ if hasattr(self.stream, "_file"):
+ return getattr(self.stream._file, name)
+ raise
+
+ def __iter__(self):
+ return iter(self.stream)
+
+ def __repr__(self):
+ return f"<{type(self).__name__}: {self.filename!r} ({self.content_type!r})>"
+
+
+class FileMultiDict(MultiDict):
+ """A special :class:`MultiDict` that has convenience methods to add
+ files to it. This is used for :class:`EnvironBuilder` and generally
+ useful for unittesting.
+
+ .. versionadded:: 0.5
+ """
+
+ def add_file(self, name, file, filename=None, content_type=None):
+ """Adds a new file to the dict. `file` can be a file name or
+ a :class:`file`-like or a :class:`FileStorage` object.
+
+ :param name: the name of the field.
+ :param file: a filename or :class:`file`-like object
+ :param filename: an optional filename
+ :param content_type: an optional content type
+ """
+ if isinstance(file, FileStorage):
+ value = file
+ else:
+ if isinstance(file, str):
+ if filename is None:
+ filename = file
+ file = open(file, "rb")
+ if filename and content_type is None:
+ content_type = (
+ mimetypes.guess_type(filename)[0] or "application/octet-stream"
+ )
+ value = FileStorage(file, filename, name, content_type)
+
+ self.add(name, value)
+
+
+# circular dependencies
+from .. import http
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/file_storage.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/file_storage.pyi
new file mode 100644
index 0000000..36a7ed9
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/file_storage.pyi
@@ -0,0 +1,49 @@
+from collections.abc import Iterator
+from os import PathLike
+from typing import Any
+from typing import IO
+
+from .headers import Headers
+from .structures import MultiDict
+
+class FileStorage:
+ name: str | None
+ stream: IO[bytes]
+ filename: str | None
+ headers: Headers
+ _parsed_content_type: tuple[str, dict[str, str]]
+ def __init__(
+ self,
+ stream: IO[bytes] | None = None,
+ filename: str | PathLike[str] | None = None,
+ name: str | None = None,
+ content_type: str | None = None,
+ content_length: int | None = None,
+ headers: Headers | None = None,
+ ) -> None: ...
+ def _parse_content_type(self) -> None: ...
+ @property
+ def content_type(self) -> str: ...
+ @property
+ def content_length(self) -> int: ...
+ @property
+ def mimetype(self) -> str: ...
+ @property
+ def mimetype_params(self) -> dict[str, str]: ...
+ def save(
+ self, dst: str | PathLike[str] | IO[bytes], buffer_size: int = ...
+ ) -> None: ...
+ def close(self) -> None: ...
+ def __bool__(self) -> bool: ...
+ def __getattr__(self, name: str) -> Any: ...
+ def __iter__(self) -> Iterator[bytes]: ...
+ def __repr__(self) -> str: ...
+
+class FileMultiDict(MultiDict[str, FileStorage]):
+ def add_file(
+ self,
+ name: str,
+ file: FileStorage | str | IO[bytes],
+ filename: str | None = None,
+ content_type: str | None = None,
+ ) -> None: ...
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/headers.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/headers.py
new file mode 100644
index 0000000..d9dd655
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/headers.py
@@ -0,0 +1,515 @@
+from __future__ import annotations
+
+import re
+import typing as t
+
+from .._internal import _missing
+from ..exceptions import BadRequestKeyError
+from .mixins import ImmutableHeadersMixin
+from .structures import iter_multi_items
+from .structures import MultiDict
+
+
+class Headers:
+ """An object that stores some headers. It has a dict-like interface,
+ but is ordered, can store the same key multiple times, and iterating
+ yields ``(key, value)`` pairs instead of only keys.
+
+ This data structure is useful if you want a nicer way to handle WSGI
+ headers which are stored as tuples in a list.
+
+ From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
+ also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
+ and will render a page for a ``400 BAD REQUEST`` if caught in a
+ catch-all for HTTP exceptions.
+
+ Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
+ class, with the exception of `__getitem__`. :mod:`wsgiref` will return
+ `None` for ``headers['missing']``, whereas :class:`Headers` will raise
+ a :class:`KeyError`.
+
+ To create a new ``Headers`` object, pass it a list, dict, or
+ other ``Headers`` object with default values. These values are
+ validated the same way values added later are.
+
+ :param defaults: The list of default values for the :class:`Headers`.
+
+ .. versionchanged:: 2.1.0
+ Default values are validated the same as values added later.
+
+ .. versionchanged:: 0.9
+ This data structure now stores unicode values similar to how the
+ multi dicts do it. The main difference is that bytes can be set as
+ well which will automatically be latin1 decoded.
+
+ .. versionchanged:: 0.9
+ The :meth:`linked` function was removed without replacement as it
+ was an API that does not support the changes to the encoding model.
+ """
+
+ def __init__(self, defaults=None):
+ self._list = []
+ if defaults is not None:
+ self.extend(defaults)
+
+ def __getitem__(self, key, _get_mode=False):
+ if not _get_mode:
+ if isinstance(key, int):
+ return self._list[key]
+ elif isinstance(key, slice):
+ return self.__class__(self._list[key])
+ if not isinstance(key, str):
+ raise BadRequestKeyError(key)
+ ikey = key.lower()
+ for k, v in self._list:
+ if k.lower() == ikey:
+ return v
+ # micro optimization: if we are in get mode we will catch that
+ # exception one stack level down so we can raise a standard
+ # key error instead of our special one.
+ if _get_mode:
+ raise KeyError()
+ raise BadRequestKeyError(key)
+
+ def __eq__(self, other):
+ def lowered(item):
+ return (item[0].lower(),) + item[1:]
+
+ return other.__class__ is self.__class__ and set(
+ map(lowered, other._list)
+ ) == set(map(lowered, self._list))
+
+ __hash__ = None
+
+ def get(self, key, default=None, type=None):
+ """Return the default value if the requested data doesn't exist.
+ If `type` is provided and is a callable it should convert the value,
+ return it or raise a :exc:`ValueError` if that is not possible. In
+ this case the function will return the default as if the value was not
+ found:
+
+ >>> d = Headers([('Content-Length', '42')])
+ >>> d.get('Content-Length', type=int)
+ 42
+
+ :param key: The key to be looked up.
+ :param default: The default value to be returned if the key can't
+ be looked up. If not further specified `None` is
+ returned.
+ :param type: A callable that is used to cast the value in the
+ :class:`Headers`. If a :exc:`ValueError` is raised
+ by this callable the default value is returned.
+
+ .. versionchanged:: 3.0
+ The ``as_bytes`` parameter was removed.
+
+ .. versionchanged:: 0.9
+ The ``as_bytes`` parameter was added.
+ """
+ try:
+ rv = self.__getitem__(key, _get_mode=True)
+ except KeyError:
+ return default
+ if type is None:
+ return rv
+ try:
+ return type(rv)
+ except ValueError:
+ return default
+
+ def getlist(self, key, type=None):
+ """Return the list of items for a given key. If that key is not in the
+ :class:`Headers`, the return value will be an empty list. Just like
+ :meth:`get`, :meth:`getlist` accepts a `type` parameter. All items will
+ be converted with the callable defined there.
+
+ :param key: The key to be looked up.
+ :param type: A callable that is used to cast the value in the
+ :class:`Headers`. If a :exc:`ValueError` is raised
+ by this callable the value will be removed from the list.
+ :return: a :class:`list` of all the values for the key.
+
+ .. versionchanged:: 3.0
+ The ``as_bytes`` parameter was removed.
+
+ .. versionchanged:: 0.9
+ The ``as_bytes`` parameter was added.
+ """
+ ikey = key.lower()
+ result = []
+ for k, v in self:
+ if k.lower() == ikey:
+ if type is not None:
+ try:
+ v = type(v)
+ except ValueError:
+ continue
+ result.append(v)
+ return result
+
+ def get_all(self, name):
+ """Return a list of all the values for the named field.
+
+ This method is compatible with the :mod:`wsgiref`
+ :meth:`~wsgiref.headers.Headers.get_all` method.
+ """
+ return self.getlist(name)
+
+ def items(self, lower=False):
+ for key, value in self:
+ if lower:
+ key = key.lower()
+ yield key, value
+
+ def keys(self, lower=False):
+ for key, _ in self.items(lower):
+ yield key
+
+ def values(self):
+ for _, value in self.items():
+ yield value
+
+ def extend(self, *args, **kwargs):
+ """Extend headers in this object with items from another object
+ containing header items as well as keyword arguments.
+
+ To replace existing keys instead of extending, use
+ :meth:`update` instead.
+
+ If provided, the first argument can be another :class:`Headers`
+ object, a :class:`MultiDict`, :class:`dict`, or iterable of
+ pairs.
+
+ .. versionchanged:: 1.0
+ Support :class:`MultiDict`. Allow passing ``kwargs``.
+ """
+ if len(args) > 1:
+ raise TypeError(f"update expected at most 1 arguments, got {len(args)}")
+
+ if args:
+ for key, value in iter_multi_items(args[0]):
+ self.add(key, value)
+
+ for key, value in iter_multi_items(kwargs):
+ self.add(key, value)
+
+ def __delitem__(self, key, _index_operation=True):
+ if _index_operation and isinstance(key, (int, slice)):
+ del self._list[key]
+ return
+ key = key.lower()
+ new = []
+ for k, v in self._list:
+ if k.lower() != key:
+ new.append((k, v))
+ self._list[:] = new
+
+ def remove(self, key):
+ """Remove a key.
+
+ :param key: The key to be removed.
+ """
+ return self.__delitem__(key, _index_operation=False)
+
+ def pop(self, key=None, default=_missing):
+ """Removes and returns a key or index.
+
+ :param key: The key to be popped. If this is an integer the item at
+ that position is removed, if it's a string the value for
+ that key is. If the key is omitted or `None` the last
+ item is removed.
+ :return: an item.
+ """
+ if key is None:
+ return self._list.pop()
+ if isinstance(key, int):
+ return self._list.pop(key)
+ try:
+ rv = self[key]
+ self.remove(key)
+ except KeyError:
+ if default is not _missing:
+ return default
+ raise
+ return rv
+
+ def popitem(self):
+ """Removes a key or index and returns a (key, value) item."""
+ return self.pop()
+
+ def __contains__(self, key):
+ """Check if a key is present."""
+ try:
+ self.__getitem__(key, _get_mode=True)
+ except KeyError:
+ return False
+ return True
+
+ def __iter__(self):
+ """Yield ``(key, value)`` tuples."""
+ return iter(self._list)
+
+ def __len__(self):
+ return len(self._list)
+
+ def add(self, _key, _value, **kw):
+ """Add a new header tuple to the list.
+
+ Keyword arguments can specify additional parameters for the header
+ value, with underscores converted to dashes::
+
+ >>> d = Headers()
+ >>> d.add('Content-Type', 'text/plain')
+ >>> d.add('Content-Disposition', 'attachment', filename='foo.png')
+
+ The keyword argument dumping uses :func:`dump_options_header`
+ behind the scenes.
+
+ .. versionadded:: 0.4.1
+ keyword arguments were added for :mod:`wsgiref` compatibility.
+ """
+ if kw:
+ _value = _options_header_vkw(_value, kw)
+ _value = _str_header_value(_value)
+ self._list.append((_key, _value))
+
+ def add_header(self, _key, _value, **_kw):
+ """Add a new header tuple to the list.
+
+ An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
+ :meth:`~wsgiref.headers.Headers.add_header` method.
+ """
+ self.add(_key, _value, **_kw)
+
+ def clear(self):
+ """Clears all headers."""
+ del self._list[:]
+
+ def set(self, _key, _value, **kw):
+ """Remove all header tuples for `key` and add a new one. The newly
+ added key either appears at the end of the list if there was no
+ entry or replaces the first one.
+
+ Keyword arguments can specify additional parameters for the header
+ value, with underscores converted to dashes. See :meth:`add` for
+ more information.
+
+ .. versionchanged:: 0.6.1
+ :meth:`set` now accepts the same arguments as :meth:`add`.
+
+ :param key: The key to be inserted.
+ :param value: The value to be inserted.
+ """
+ if kw:
+ _value = _options_header_vkw(_value, kw)
+ _value = _str_header_value(_value)
+ if not self._list:
+ self._list.append((_key, _value))
+ return
+ listiter = iter(self._list)
+ ikey = _key.lower()
+ for idx, (old_key, _old_value) in enumerate(listiter):
+ if old_key.lower() == ikey:
+ # replace first occurrence
+ self._list[idx] = (_key, _value)
+ break
+ else:
+ self._list.append((_key, _value))
+ return
+ self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey]
+
+ def setlist(self, key, values):
+ """Remove any existing values for a header and add new ones.
+
+ :param key: The header key to set.
+ :param values: An iterable of values to set for the key.
+
+ .. versionadded:: 1.0
+ """
+ if values:
+ values_iter = iter(values)
+ self.set(key, next(values_iter))
+
+ for value in values_iter:
+ self.add(key, value)
+ else:
+ self.remove(key)
+
+ def setdefault(self, key, default):
+ """Return the first value for the key if it is in the headers,
+ otherwise set the header to the value given by ``default`` and
+ return that.
+
+ :param key: The header key to get.
+ :param default: The value to set for the key if it is not in the
+ headers.
+ """
+ if key in self:
+ return self[key]
+
+ self.set(key, default)
+ return default
+
+ def setlistdefault(self, key, default):
+ """Return the list of values for the key if it is in the
+ headers, otherwise set the header to the list of values given
+ by ``default`` and return that.
+
+ Unlike :meth:`MultiDict.setlistdefault`, modifying the returned
+ list will not affect the headers.
+
+ :param key: The header key to get.
+ :param default: An iterable of values to set for the key if it
+ is not in the headers.
+
+ .. versionadded:: 1.0
+ """
+ if key not in self:
+ self.setlist(key, default)
+
+ return self.getlist(key)
+
+ def __setitem__(self, key, value):
+ """Like :meth:`set` but also supports index/slice based setting."""
+ if isinstance(key, (slice, int)):
+ if isinstance(key, int):
+ value = [value]
+ value = [(k, _str_header_value(v)) for (k, v) in value]
+ if isinstance(key, int):
+ self._list[key] = value[0]
+ else:
+ self._list[key] = value
+ else:
+ self.set(key, value)
+
+ def update(self, *args, **kwargs):
+ """Replace headers in this object with items from another
+ headers object and keyword arguments.
+
+ To extend existing keys instead of replacing, use :meth:`extend`
+ instead.
+
+ If provided, the first argument can be another :class:`Headers`
+ object, a :class:`MultiDict`, :class:`dict`, or iterable of
+ pairs.
+
+ .. versionadded:: 1.0
+ """
+ if len(args) > 1:
+ raise TypeError(f"update expected at most 1 arguments, got {len(args)}")
+
+ if args:
+ mapping = args[0]
+
+ if isinstance(mapping, (Headers, MultiDict)):
+ for key in mapping.keys():
+ self.setlist(key, mapping.getlist(key))
+ elif isinstance(mapping, dict):
+ for key, value in mapping.items():
+ if isinstance(value, (list, tuple)):
+ self.setlist(key, value)
+ else:
+ self.set(key, value)
+ else:
+ for key, value in mapping:
+ self.set(key, value)
+
+ for key, value in kwargs.items():
+ if isinstance(value, (list, tuple)):
+ self.setlist(key, value)
+ else:
+ self.set(key, value)
+
+ def to_wsgi_list(self):
+ """Convert the headers into a list suitable for WSGI.
+
+ :return: list
+ """
+ return list(self)
+
+ def copy(self):
+ return self.__class__(self._list)
+
+ def __copy__(self):
+ return self.copy()
+
+ def __str__(self):
+ """Returns formatted headers suitable for HTTP transmission."""
+ strs = []
+ for key, value in self.to_wsgi_list():
+ strs.append(f"{key}: {value}")
+ strs.append("\r\n")
+ return "\r\n".join(strs)
+
+ def __repr__(self):
+ return f"{type(self).__name__}({list(self)!r})"
+
+
+def _options_header_vkw(value: str, kw: dict[str, t.Any]):
+ return http.dump_options_header(
+ value, {k.replace("_", "-"): v for k, v in kw.items()}
+ )
+
+
+_newline_re = re.compile(r"[\r\n]")
+
+
+def _str_header_value(value: t.Any) -> str:
+ if not isinstance(value, str):
+ value = str(value)
+
+ if _newline_re.search(value) is not None:
+ raise ValueError("Header values must not contain newline characters.")
+
+ return value
+
+
+class EnvironHeaders(ImmutableHeadersMixin, Headers):
+ """Read only version of the headers from a WSGI environment. This
+ provides the same interface as `Headers` and is constructed from
+ a WSGI environment.
+ From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+ subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+ render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
+ HTTP exceptions.
+ """
+
+ def __init__(self, environ):
+ self.environ = environ
+
+ def __eq__(self, other):
+ return self.environ is other.environ
+
+ __hash__ = None
+
+ def __getitem__(self, key, _get_mode=False):
+ # _get_mode is a no-op for this class as there is no index but
+ # used because get() calls it.
+ if not isinstance(key, str):
+ raise KeyError(key)
+ key = key.upper().replace("-", "_")
+ if key in {"CONTENT_TYPE", "CONTENT_LENGTH"}:
+ return self.environ[key]
+ return self.environ[f"HTTP_{key}"]
+
+ def __len__(self):
+ # the iter is necessary because otherwise list calls our
+ # len which would call list again and so forth.
+ return len(list(iter(self)))
+
+ def __iter__(self):
+ for key, value in self.environ.items():
+ if key.startswith("HTTP_") and key not in {
+ "HTTP_CONTENT_TYPE",
+ "HTTP_CONTENT_LENGTH",
+ }:
+ yield key[5:].replace("_", "-").title(), value
+ elif key in {"CONTENT_TYPE", "CONTENT_LENGTH"} and value:
+ yield key.replace("_", "-").title(), value
+
+ def copy(self):
+ raise TypeError(f"cannot create {type(self).__name__!r} copies")
+
+
+# circular dependencies
+from .. import http
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/headers.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/headers.pyi
new file mode 100644
index 0000000..8650222
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/headers.pyi
@@ -0,0 +1,109 @@
+from collections.abc import Callable
+from collections.abc import Iterable
+from collections.abc import Iterator
+from collections.abc import Mapping
+from typing import Literal
+from typing import NoReturn
+from typing import overload
+from typing import TypeVar
+
+from _typeshed import SupportsKeysAndGetItem
+from _typeshed.wsgi import WSGIEnvironment
+
+from .mixins import ImmutableHeadersMixin
+
+D = TypeVar("D")
+T = TypeVar("T")
+
+class Headers(dict[str, str]):
+ _list: list[tuple[str, str]]
+ def __init__(
+ self,
+ defaults: Mapping[str, str | Iterable[str]]
+ | Iterable[tuple[str, str]]
+ | None = None,
+ ) -> None: ...
+ @overload
+ def __getitem__(self, key: str) -> str: ...
+ @overload
+ def __getitem__(self, key: int) -> tuple[str, str]: ...
+ @overload
+ def __getitem__(self, key: slice) -> Headers: ...
+ @overload
+ def __getitem__(self, key: str, _get_mode: Literal[True] = ...) -> str: ...
+ def __eq__(self, other: object) -> bool: ...
+ @overload # type: ignore
+ def get(self, key: str, default: str) -> str: ...
+ @overload
+ def get(self, key: str, default: str | None = None) -> str | None: ...
+ @overload
+ def get(
+ self, key: str, default: T | None = None, type: Callable[[str], T] = ...
+ ) -> T | None: ...
+ @overload
+ def getlist(self, key: str) -> list[str]: ...
+ @overload
+ def getlist(self, key: str, type: Callable[[str], T]) -> list[T]: ...
+ def get_all(self, name: str) -> list[str]: ...
+ def items( # type: ignore
+ self, lower: bool = False
+ ) -> Iterator[tuple[str, str]]: ...
+ def keys(self, lower: bool = False) -> Iterator[str]: ... # type: ignore
+ def values(self) -> Iterator[str]: ... # type: ignore
+ def extend(
+ self,
+ *args: Mapping[str, str | Iterable[str]] | Iterable[tuple[str, str]],
+ **kwargs: str | Iterable[str],
+ ) -> None: ...
+ @overload
+ def __delitem__(self, key: str | int | slice) -> None: ...
+ @overload
+ def __delitem__(self, key: str, _index_operation: Literal[False]) -> None: ...
+ def remove(self, key: str) -> None: ...
+ @overload # type: ignore
+ def pop(self, key: str, default: str | None = None) -> str: ...
+ @overload
+ def pop(
+ self, key: int | None = None, default: tuple[str, str] | None = None
+ ) -> tuple[str, str]: ...
+ def popitem(self) -> tuple[str, str]: ...
+ def __contains__(self, key: str) -> bool: ... # type: ignore
+ def has_key(self, key: str) -> bool: ...
+ def __iter__(self) -> Iterator[tuple[str, str]]: ... # type: ignore
+ def add(self, _key: str, _value: str, **kw: str) -> None: ...
+ def _validate_value(self, value: str) -> None: ...
+ def add_header(self, _key: str, _value: str, **_kw: str) -> None: ...
+ def clear(self) -> None: ...
+ def set(self, _key: str, _value: str, **kw: str) -> None: ...
+ def setlist(self, key: str, values: Iterable[str]) -> None: ...
+ def setdefault(self, key: str, default: str) -> str: ...
+ def setlistdefault(self, key: str, default: Iterable[str]) -> None: ...
+ @overload
+ def __setitem__(self, key: str, value: str) -> None: ...
+ @overload
+ def __setitem__(self, key: int, value: tuple[str, str]) -> None: ...
+ @overload
+ def __setitem__(self, key: slice, value: Iterable[tuple[str, str]]) -> None: ...
+ @overload
+ def update(
+ self, __m: SupportsKeysAndGetItem[str, str], **kwargs: str | Iterable[str]
+ ) -> None: ...
+ @overload
+ def update(
+ self, __m: Iterable[tuple[str, str]], **kwargs: str | Iterable[str]
+ ) -> None: ...
+ @overload
+ def update(self, **kwargs: str | Iterable[str]) -> None: ...
+ def to_wsgi_list(self) -> list[tuple[str, str]]: ...
+ def copy(self) -> Headers: ...
+ def __copy__(self) -> Headers: ...
+
+class EnvironHeaders(ImmutableHeadersMixin, Headers):
+ environ: WSGIEnvironment
+ def __init__(self, environ: WSGIEnvironment) -> None: ...
+ def __eq__(self, other: object) -> bool: ...
+ def __getitem__( # type: ignore
+ self, key: str, _get_mode: Literal[False] = False
+ ) -> str: ...
+ def __iter__(self) -> Iterator[tuple[str, str]]: ... # type: ignore
+ def copy(self) -> NoReturn: ...
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/mixins.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/mixins.py
new file mode 100644
index 0000000..2c84ca8
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/mixins.py
@@ -0,0 +1,242 @@
+from __future__ import annotations
+
+from itertools import repeat
+
+from .._internal import _missing
+
+
+def is_immutable(self):
+ raise TypeError(f"{type(self).__name__!r} objects are immutable")
+
+
+class ImmutableListMixin:
+ """Makes a :class:`list` immutable.
+
+ .. versionadded:: 0.5
+
+ :private:
+ """
+
+ _hash_cache = None
+
+ def __hash__(self):
+ if self._hash_cache is not None:
+ return self._hash_cache
+ rv = self._hash_cache = hash(tuple(self))
+ return rv
+
+ def __reduce_ex__(self, protocol):
+ return type(self), (list(self),)
+
+ def __delitem__(self, key):
+ is_immutable(self)
+
+ def __iadd__(self, other):
+ is_immutable(self)
+
+ def __imul__(self, other):
+ is_immutable(self)
+
+ def __setitem__(self, key, value):
+ is_immutable(self)
+
+ def append(self, item):
+ is_immutable(self)
+
+ def remove(self, item):
+ is_immutable(self)
+
+ def extend(self, iterable):
+ is_immutable(self)
+
+ def insert(self, pos, value):
+ is_immutable(self)
+
+ def pop(self, index=-1):
+ is_immutable(self)
+
+ def reverse(self):
+ is_immutable(self)
+
+ def sort(self, key=None, reverse=False):
+ is_immutable(self)
+
+
+class ImmutableDictMixin:
+ """Makes a :class:`dict` immutable.
+
+ .. versionadded:: 0.5
+
+ :private:
+ """
+
+ _hash_cache = None
+
+ @classmethod
+ def fromkeys(cls, keys, value=None):
+ instance = super().__new__(cls)
+ instance.__init__(zip(keys, repeat(value)))
+ return instance
+
+ def __reduce_ex__(self, protocol):
+ return type(self), (dict(self),)
+
+ def _iter_hashitems(self):
+ return self.items()
+
+ def __hash__(self):
+ if self._hash_cache is not None:
+ return self._hash_cache
+ rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
+ return rv
+
+ def setdefault(self, key, default=None):
+ is_immutable(self)
+
+ def update(self, *args, **kwargs):
+ is_immutable(self)
+
+ def pop(self, key, default=None):
+ is_immutable(self)
+
+ def popitem(self):
+ is_immutable(self)
+
+ def __setitem__(self, key, value):
+ is_immutable(self)
+
+ def __delitem__(self, key):
+ is_immutable(self)
+
+ def clear(self):
+ is_immutable(self)
+
+
+class ImmutableMultiDictMixin(ImmutableDictMixin):
+ """Makes a :class:`MultiDict` immutable.
+
+ .. versionadded:: 0.5
+
+ :private:
+ """
+
+ def __reduce_ex__(self, protocol):
+ return type(self), (list(self.items(multi=True)),)
+
+ def _iter_hashitems(self):
+ return self.items(multi=True)
+
+ def add(self, key, value):
+ is_immutable(self)
+
+ def popitemlist(self):
+ is_immutable(self)
+
+ def poplist(self, key):
+ is_immutable(self)
+
+ def setlist(self, key, new_list):
+ is_immutable(self)
+
+ def setlistdefault(self, key, default_list=None):
+ is_immutable(self)
+
+
+class ImmutableHeadersMixin:
+ """Makes a :class:`Headers` immutable. We do not mark them as
+ hashable though since the only usecase for this datastructure
+ in Werkzeug is a view on a mutable structure.
+
+ .. versionadded:: 0.5
+
+ :private:
+ """
+
+ def __delitem__(self, key, **kwargs):
+ is_immutable(self)
+
+ def __setitem__(self, key, value):
+ is_immutable(self)
+
+ def set(self, _key, _value, **kwargs):
+ is_immutable(self)
+
+ def setlist(self, key, values):
+ is_immutable(self)
+
+ def add(self, _key, _value, **kwargs):
+ is_immutable(self)
+
+ def add_header(self, _key, _value, **_kwargs):
+ is_immutable(self)
+
+ def remove(self, key):
+ is_immutable(self)
+
+ def extend(self, *args, **kwargs):
+ is_immutable(self)
+
+ def update(self, *args, **kwargs):
+ is_immutable(self)
+
+ def insert(self, pos, value):
+ is_immutable(self)
+
+ def pop(self, key=None, default=_missing):
+ is_immutable(self)
+
+ def popitem(self):
+ is_immutable(self)
+
+ def setdefault(self, key, default):
+ is_immutable(self)
+
+ def setlistdefault(self, key, default):
+ is_immutable(self)
+
+
+def _calls_update(name):
+ def oncall(self, *args, **kw):
+ rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
+
+ if self.on_update is not None:
+ self.on_update(self)
+
+ return rv
+
+ oncall.__name__ = name
+ return oncall
+
+
+class UpdateDictMixin(dict):
+ """Makes dicts call `self.on_update` on modifications.
+
+ .. versionadded:: 0.5
+
+ :private:
+ """
+
+ on_update = None
+
+ def setdefault(self, key, default=None):
+ modified = key not in self
+ rv = super().setdefault(key, default)
+ if modified and self.on_update is not None:
+ self.on_update(self)
+ return rv
+
+ def pop(self, key, default=_missing):
+ modified = key in self
+ if default is _missing:
+ rv = super().pop(key)
+ else:
+ rv = super().pop(key, default)
+ if modified and self.on_update is not None:
+ self.on_update(self)
+ return rv
+
+ __setitem__ = _calls_update("__setitem__")
+ __delitem__ = _calls_update("__delitem__")
+ clear = _calls_update("clear")
+ popitem = _calls_update("popitem")
+ update = _calls_update("update")
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/mixins.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/mixins.pyi
new file mode 100644
index 0000000..40453f7
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/mixins.pyi
@@ -0,0 +1,97 @@
+from collections.abc import Callable
+from collections.abc import Hashable
+from collections.abc import Iterable
+from typing import Any
+from typing import NoReturn
+from typing import overload
+from typing import SupportsIndex
+from typing import TypeVar
+
+from _typeshed import SupportsKeysAndGetItem
+
+from .headers import Headers
+
+K = TypeVar("K")
+T = TypeVar("T")
+V = TypeVar("V")
+
+def is_immutable(self: object) -> NoReturn: ...
+
+class ImmutableListMixin(list[V]):
+ _hash_cache: int | None
+ def __hash__(self) -> int: ... # type: ignore
+ def __delitem__(self, key: SupportsIndex | slice) -> NoReturn: ...
+ def __iadd__(self, other: Any) -> NoReturn: ... # type: ignore
+ def __imul__(self, other: SupportsIndex) -> NoReturn: ...
+ def __setitem__(self, key: int | slice, value: V) -> NoReturn: ... # type: ignore
+ def append(self, value: V) -> NoReturn: ...
+ def remove(self, value: V) -> NoReturn: ...
+ def extend(self, values: Iterable[V]) -> NoReturn: ...
+ def insert(self, pos: SupportsIndex, value: V) -> NoReturn: ...
+ def pop(self, index: SupportsIndex = -1) -> NoReturn: ...
+ def reverse(self) -> NoReturn: ...
+ def sort(
+ self, key: Callable[[V], Any] | None = None, reverse: bool = False
+ ) -> NoReturn: ...
+
+class ImmutableDictMixin(dict[K, V]):
+ _hash_cache: int | None
+ @classmethod
+ def fromkeys( # type: ignore
+ cls, keys: Iterable[K], value: V | None = None
+ ) -> ImmutableDictMixin[K, V]: ...
+ def _iter_hashitems(self) -> Iterable[Hashable]: ...
+ def __hash__(self) -> int: ... # type: ignore
+ def setdefault(self, key: K, default: V | None = None) -> NoReturn: ...
+ def update(self, *args: Any, **kwargs: V) -> NoReturn: ...
+ def pop(self, key: K, default: V | None = None) -> NoReturn: ... # type: ignore
+ def popitem(self) -> NoReturn: ...
+ def __setitem__(self, key: K, value: V) -> NoReturn: ...
+ def __delitem__(self, key: K) -> NoReturn: ...
+ def clear(self) -> NoReturn: ...
+
+class ImmutableMultiDictMixin(ImmutableDictMixin[K, V]):
+ def _iter_hashitems(self) -> Iterable[Hashable]: ...
+ def add(self, key: K, value: V) -> NoReturn: ...
+ def popitemlist(self) -> NoReturn: ...
+ def poplist(self, key: K) -> NoReturn: ...
+ def setlist(self, key: K, new_list: Iterable[V]) -> NoReturn: ...
+ def setlistdefault(
+ self, key: K, default_list: Iterable[V] | None = None
+ ) -> NoReturn: ...
+
+class ImmutableHeadersMixin(Headers):
+ def __delitem__(self, key: Any, _index_operation: bool = True) -> NoReturn: ...
+ def __setitem__(self, key: Any, value: Any) -> NoReturn: ...
+ def set(self, _key: Any, _value: Any, **kw: Any) -> NoReturn: ...
+ def setlist(self, key: Any, values: Any) -> NoReturn: ...
+ def add(self, _key: Any, _value: Any, **kw: Any) -> NoReturn: ...
+ def add_header(self, _key: Any, _value: Any, **_kw: Any) -> NoReturn: ...
+ def remove(self, key: Any) -> NoReturn: ...
+ def extend(self, *args: Any, **kwargs: Any) -> NoReturn: ...
+ def update(self, *args: Any, **kwargs: Any) -> NoReturn: ...
+ def insert(self, pos: Any, value: Any) -> NoReturn: ...
+ def pop(self, key: Any = None, default: Any = ...) -> NoReturn: ...
+ def popitem(self) -> NoReturn: ...
+ def setdefault(self, key: Any, default: Any) -> NoReturn: ...
+ def setlistdefault(self, key: Any, default: Any) -> NoReturn: ...
+
+def _calls_update(name: str) -> Callable[[UpdateDictMixin[K, V]], Any]: ...
+
+class UpdateDictMixin(dict[K, V]):
+ on_update: Callable[[UpdateDictMixin[K, V] | None, None], None]
+ def setdefault(self, key: K, default: V | None = None) -> V: ...
+ @overload
+ def pop(self, key: K) -> V: ...
+ @overload
+ def pop(self, key: K, default: V | T = ...) -> V | T: ...
+ def __setitem__(self, key: K, value: V) -> None: ...
+ def __delitem__(self, key: K) -> None: ...
+ def clear(self) -> None: ...
+ def popitem(self) -> tuple[K, V]: ...
+ @overload
+ def update(self, __m: SupportsKeysAndGetItem[K, V], **kwargs: V) -> None: ...
+ @overload
+ def update(self, __m: Iterable[tuple[K, V]], **kwargs: V) -> None: ...
+ @overload
+ def update(self, **kwargs: V) -> None: ...
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/range.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/range.py
new file mode 100644
index 0000000..7011ea4
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/range.py
@@ -0,0 +1,180 @@
+from __future__ import annotations
+
+
+class IfRange:
+ """Very simple object that represents the `If-Range` header in parsed
+ form. It will either have neither a etag or date or one of either but
+ never both.
+
+ .. versionadded:: 0.7
+ """
+
+ def __init__(self, etag=None, date=None):
+ #: The etag parsed and unquoted. Ranges always operate on strong
+ #: etags so the weakness information is not necessary.
+ self.etag = etag
+ #: The date in parsed format or `None`.
+ self.date = date
+
+ def to_header(self):
+ """Converts the object back into an HTTP header."""
+ if self.date is not None:
+ return http.http_date(self.date)
+ if self.etag is not None:
+ return http.quote_etag(self.etag)
+ return ""
+
+ def __str__(self):
+ return self.to_header()
+
+ def __repr__(self):
+ return f"<{type(self).__name__} {str(self)!r}>"
+
+
+class Range:
+ """Represents a ``Range`` header. All methods only support only
+ bytes as the unit. Stores a list of ranges if given, but the methods
+ only work if only one range is provided.
+
+ :raise ValueError: If the ranges provided are invalid.
+
+ .. versionchanged:: 0.15
+ The ranges passed in are validated.
+
+ .. versionadded:: 0.7
+ """
+
+ def __init__(self, units, ranges):
+ #: The units of this range. Usually "bytes".
+ self.units = units
+ #: A list of ``(begin, end)`` tuples for the range header provided.
+ #: The ranges are non-inclusive.
+ self.ranges = ranges
+
+ for start, end in ranges:
+ if start is None or (end is not None and (start < 0 or start >= end)):
+ raise ValueError(f"{(start, end)} is not a valid range.")
+
+ def range_for_length(self, length):
+ """If the range is for bytes, the length is not None and there is
+ exactly one range and it is satisfiable it returns a ``(start, stop)``
+ tuple, otherwise `None`.
+ """
+ if self.units != "bytes" or length is None or len(self.ranges) != 1:
+ return None
+ start, end = self.ranges[0]
+ if end is None:
+ end = length
+ if start < 0:
+ start += length
+ if http.is_byte_range_valid(start, end, length):
+ return start, min(end, length)
+ return None
+
+ def make_content_range(self, length):
+ """Creates a :class:`~werkzeug.datastructures.ContentRange` object
+ from the current range and given content length.
+ """
+ rng = self.range_for_length(length)
+ if rng is not None:
+ return ContentRange(self.units, rng[0], rng[1], length)
+ return None
+
+ def to_header(self):
+ """Converts the object back into an HTTP header."""
+ ranges = []
+ for begin, end in self.ranges:
+ if end is None:
+ ranges.append(f"{begin}-" if begin >= 0 else str(begin))
+ else:
+ ranges.append(f"{begin}-{end - 1}")
+ return f"{self.units}={','.join(ranges)}"
+
+ def to_content_range_header(self, length):
+ """Converts the object into `Content-Range` HTTP header,
+ based on given length
+ """
+ range = self.range_for_length(length)
+ if range is not None:
+ return f"{self.units} {range[0]}-{range[1] - 1}/{length}"
+ return None
+
+ def __str__(self):
+ return self.to_header()
+
+ def __repr__(self):
+ return f"<{type(self).__name__} {str(self)!r}>"
+
+
+def _callback_property(name):
+ def fget(self):
+ return getattr(self, name)
+
+ def fset(self, value):
+ setattr(self, name, value)
+ if self.on_update is not None:
+ self.on_update(self)
+
+ return property(fget, fset)
+
+
+class ContentRange:
+ """Represents the content range header.
+
+ .. versionadded:: 0.7
+ """
+
+ def __init__(self, units, start, stop, length=None, on_update=None):
+ assert http.is_byte_range_valid(start, stop, length), "Bad range provided"
+ self.on_update = on_update
+ self.set(start, stop, length, units)
+
+ #: The units to use, usually "bytes"
+ units = _callback_property("_units")
+ #: The start point of the range or `None`.
+ start = _callback_property("_start")
+ #: The stop point of the range (non-inclusive) or `None`. Can only be
+ #: `None` if also start is `None`.
+ stop = _callback_property("_stop")
+ #: The length of the range or `None`.
+ length = _callback_property("_length")
+
+ def set(self, start, stop, length=None, units="bytes"):
+ """Simple method to update the ranges."""
+ assert http.is_byte_range_valid(start, stop, length), "Bad range provided"
+ self._units = units
+ self._start = start
+ self._stop = stop
+ self._length = length
+ if self.on_update is not None:
+ self.on_update(self)
+
+ def unset(self):
+ """Sets the units to `None` which indicates that the header should
+ no longer be used.
+ """
+ self.set(None, None, units=None)
+
+ def to_header(self):
+ if self.units is None:
+ return ""
+ if self.length is None:
+ length = "*"
+ else:
+ length = self.length
+ if self.start is None:
+ return f"{self.units} */{length}"
+ return f"{self.units} {self.start}-{self.stop - 1}/{length}"
+
+ def __bool__(self):
+ return self.units is not None
+
+ def __str__(self):
+ return self.to_header()
+
+ def __repr__(self):
+ return f"<{type(self).__name__} {str(self)!r}>"
+
+
+# circular dependencies
+from .. import http
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/range.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/range.pyi
new file mode 100644
index 0000000..f38ad69
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/range.pyi
@@ -0,0 +1,57 @@
+from collections.abc import Callable
+from datetime import datetime
+
+class IfRange:
+ etag: str | None
+ date: datetime | None
+ def __init__(
+ self, etag: str | None = None, date: datetime | None = None
+ ) -> None: ...
+ def to_header(self) -> str: ...
+
+class Range:
+ units: str
+ ranges: list[tuple[int, int | None]]
+ def __init__(self, units: str, ranges: list[tuple[int, int | None]]) -> None: ...
+ def range_for_length(self, length: int | None) -> tuple[int, int] | None: ...
+ def make_content_range(self, length: int | None) -> ContentRange | None: ...
+ def to_header(self) -> str: ...
+ def to_content_range_header(self, length: int | None) -> str | None: ...
+
+def _callback_property(name: str) -> property: ...
+
+class ContentRange:
+ on_update: Callable[[ContentRange], None] | None
+ def __init__(
+ self,
+ units: str | None,
+ start: int | None,
+ stop: int | None,
+ length: int | None = None,
+ on_update: Callable[[ContentRange], None] | None = None,
+ ) -> None: ...
+ @property
+ def units(self) -> str | None: ...
+ @units.setter
+ def units(self, value: str | None) -> None: ...
+ @property
+ def start(self) -> int | None: ...
+ @start.setter
+ def start(self, value: int | None) -> None: ...
+ @property
+ def stop(self) -> int | None: ...
+ @stop.setter
+ def stop(self, value: int | None) -> None: ...
+ @property
+ def length(self) -> int | None: ...
+ @length.setter
+ def length(self, value: int | None) -> None: ...
+ def set(
+ self,
+ start: int | None,
+ stop: int | None,
+ length: int | None = None,
+ units: str | None = "bytes",
+ ) -> None: ...
+ def unset(self) -> None: ...
+ def to_header(self) -> str: ...
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/structures.py b/venv/lib/python3.8/site-packages/werkzeug/datastructures/structures.py
new file mode 100644
index 0000000..4279ceb
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/structures.py
@@ -0,0 +1,1010 @@
+from __future__ import annotations
+
+from collections.abc import MutableSet
+from copy import deepcopy
+
+from .. import exceptions
+from .._internal import _missing
+from .mixins import ImmutableDictMixin
+from .mixins import ImmutableListMixin
+from .mixins import ImmutableMultiDictMixin
+from .mixins import UpdateDictMixin
+
+
+def is_immutable(self):
+ raise TypeError(f"{type(self).__name__!r} objects are immutable")
+
+
+def iter_multi_items(mapping):
+ """Iterates over the items of a mapping yielding keys and values
+ without dropping any from more complex structures.
+ """
+ if isinstance(mapping, MultiDict):
+ yield from mapping.items(multi=True)
+ elif isinstance(mapping, dict):
+ for key, value in mapping.items():
+ if isinstance(value, (tuple, list)):
+ for v in value:
+ yield key, v
+ else:
+ yield key, value
+ else:
+ yield from mapping
+
+
+class ImmutableList(ImmutableListMixin, list):
+ """An immutable :class:`list`.
+
+ .. versionadded:: 0.5
+
+ :private:
+ """
+
+ def __repr__(self):
+ return f"{type(self).__name__}({list.__repr__(self)})"
+
+
+class TypeConversionDict(dict):
+ """Works like a regular dict but the :meth:`get` method can perform
+ type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
+ are subclasses of this class and provide the same feature.
+
+ .. versionadded:: 0.5
+ """
+
+ def get(self, key, default=None, type=None):
+ """Return the default value if the requested data doesn't exist.
+ If `type` is provided and is a callable it should convert the value,
+ return it or raise a :exc:`ValueError` if that is not possible. In
+ this case the function will return the default as if the value was not
+ found:
+
+ >>> d = TypeConversionDict(foo='42', bar='blub')
+ >>> d.get('foo', type=int)
+ 42
+ >>> d.get('bar', -1, type=int)
+ -1
+
+ :param key: The key to be looked up.
+ :param default: The default value to be returned if the key can't
+ be looked up. If not further specified `None` is
+ returned.
+ :param type: A callable that is used to cast the value in the
+ :class:`MultiDict`. If a :exc:`ValueError` or a
+ :exc:`TypeError` is raised by this callable the default
+ value is returned.
+
+ .. versionchanged:: 3.0.2
+ Returns the default value on :exc:`TypeError`, too.
+ """
+ try:
+ rv = self[key]
+ except KeyError:
+ return default
+ if type is not None:
+ try:
+ rv = type(rv)
+ except (ValueError, TypeError):
+ rv = default
+ return rv
+
+
+class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
+ """Works like a :class:`TypeConversionDict` but does not support
+ modifications.
+
+ .. versionadded:: 0.5
+ """
+
+ def copy(self):
+ """Return a shallow mutable copy of this object. Keep in mind that
+ the standard library's :func:`copy` function is a no-op for this class
+ like for any other python immutable type (eg: :class:`tuple`).
+ """
+ return TypeConversionDict(self)
+
+ def __copy__(self):
+ return self
+
+
+class MultiDict(TypeConversionDict):
+ """A :class:`MultiDict` is a dictionary subclass customized to deal with
+ multiple values for the same key which is for example used by the parsing
+ functions in the wrappers. This is necessary because some HTML form
+ elements pass multiple values for the same key.
+
+ :class:`MultiDict` implements all standard dictionary methods.
+ Internally, it saves all values for a key as a list, but the standard dict
+ access methods will only return the first value for a key. If you want to
+ gain access to the other values, too, you have to use the `list` methods as
+ explained below.
+
+ Basic Usage:
+
+ >>> d = MultiDict([('a', 'b'), ('a', 'c')])
+ >>> d
+ MultiDict([('a', 'b'), ('a', 'c')])
+ >>> d['a']
+ 'b'
+ >>> d.getlist('a')
+ ['b', 'c']
+ >>> 'a' in d
+ True
+
+ It behaves like a normal dict thus all dict functions will only return the
+ first value when multiple values for one key are found.
+
+ From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+ subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+ render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
+ exceptions.
+
+ A :class:`MultiDict` can be constructed from an iterable of
+ ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
+ onwards some keyword parameters.
+
+ :param mapping: the initial value for the :class:`MultiDict`. Either a
+ regular dict, an iterable of ``(key, value)`` tuples
+ or `None`.
+ """
+
+ def __init__(self, mapping=None):
+ if isinstance(mapping, MultiDict):
+ dict.__init__(self, ((k, vs[:]) for k, vs in mapping.lists()))
+ elif isinstance(mapping, dict):
+ tmp = {}
+ for key, value in mapping.items():
+ if isinstance(value, (tuple, list)):
+ if len(value) == 0:
+ continue
+ value = list(value)
+ else:
+ value = [value]
+ tmp[key] = value
+ dict.__init__(self, tmp)
+ else:
+ tmp = {}
+ for key, value in mapping or ():
+ tmp.setdefault(key, []).append(value)
+ dict.__init__(self, tmp)
+
+ def __getstate__(self):
+ return dict(self.lists())
+
+ def __setstate__(self, value):
+ dict.clear(self)
+ dict.update(self, value)
+
+ def __iter__(self):
+ # Work around https://bugs.python.org/issue43246.
+ # (`return super().__iter__()` also works here, which makes this look
+ # even more like it should be a no-op, yet it isn't.)
+ return dict.__iter__(self)
+
+ def __getitem__(self, key):
+ """Return the first data value for this key;
+ raises KeyError if not found.
+
+ :param key: The key to be looked up.
+ :raise KeyError: if the key does not exist.
+ """
+
+ if key in self:
+ lst = dict.__getitem__(self, key)
+ if len(lst) > 0:
+ return lst[0]
+ raise exceptions.BadRequestKeyError(key)
+
+ def __setitem__(self, key, value):
+ """Like :meth:`add` but removes an existing key first.
+
+ :param key: the key for the value.
+ :param value: the value to set.
+ """
+ dict.__setitem__(self, key, [value])
+
+ def add(self, key, value):
+ """Adds a new value for the key.
+
+ .. versionadded:: 0.6
+
+ :param key: the key for the value.
+ :param value: the value to add.
+ """
+ dict.setdefault(self, key, []).append(value)
+
+ def getlist(self, key, type=None):
+ """Return the list of items for a given key. If that key is not in the
+ `MultiDict`, the return value will be an empty list. Just like `get`,
+ `getlist` accepts a `type` parameter. All items will be converted
+ with the callable defined there.
+
+ :param key: The key to be looked up.
+ :param type: A callable that is used to cast the value in the
+ :class:`MultiDict`. If a :exc:`ValueError` is raised
+ by this callable the value will be removed from the list.
+ :return: a :class:`list` of all the values for the key.
+ """
+ try:
+ rv = dict.__getitem__(self, key)
+ except KeyError:
+ return []
+ if type is None:
+ return list(rv)
+ result = []
+ for item in rv:
+ try:
+ result.append(type(item))
+ except ValueError:
+ pass
+ return result
+
+ def setlist(self, key, new_list):
+ """Remove the old values for a key and add new ones. Note that the list
+ you pass the values in will be shallow-copied before it is inserted in
+ the dictionary.
+
+ >>> d = MultiDict()
+ >>> d.setlist('foo', ['1', '2'])
+ >>> d['foo']
+ '1'
+ >>> d.getlist('foo')
+ ['1', '2']
+
+ :param key: The key for which the values are set.
+ :param new_list: An iterable with the new values for the key. Old values
+ are removed first.
+ """
+ dict.__setitem__(self, key, list(new_list))
+
+ def setdefault(self, key, default=None):
+ """Returns the value for the key if it is in the dict, otherwise it
+ returns `default` and sets that value for `key`.
+
+ :param key: The key to be looked up.
+ :param default: The default value to be returned if the key is not
+ in the dict. If not further specified it's `None`.
+ """
+ if key not in self:
+ self[key] = default
+ else:
+ default = self[key]
+ return default
+
+ def setlistdefault(self, key, default_list=None):
+ """Like `setdefault` but sets multiple values. The list returned
+ is not a copy, but the list that is actually used internally. This
+ means that you can put new values into the dict by appending items
+ to the list:
+
+ >>> d = MultiDict({"foo": 1})
+ >>> d.setlistdefault("foo").extend([2, 3])
+ >>> d.getlist("foo")
+ [1, 2, 3]
+
+ :param key: The key to be looked up.
+ :param default_list: An iterable of default values. It is either copied
+ (in case it was a list) or converted into a list
+ before returned.
+ :return: a :class:`list`
+ """
+ if key not in self:
+ default_list = list(default_list or ())
+ dict.__setitem__(self, key, default_list)
+ else:
+ default_list = dict.__getitem__(self, key)
+ return default_list
+
+ def items(self, multi=False):
+ """Return an iterator of ``(key, value)`` pairs.
+
+ :param multi: If set to `True` the iterator returned will have a pair
+ for each value of each key. Otherwise it will only
+ contain pairs for the first value of each key.
+ """
+ for key, values in dict.items(self):
+ if multi:
+ for value in values:
+ yield key, value
+ else:
+ yield key, values[0]
+
+ def lists(self):
+ """Return a iterator of ``(key, values)`` pairs, where values is the list
+ of all values associated with the key."""
+ for key, values in dict.items(self):
+ yield key, list(values)
+
+ def values(self):
+ """Returns an iterator of the first value on every key's value list."""
+ for values in dict.values(self):
+ yield values[0]
+
+ def listvalues(self):
+ """Return an iterator of all values associated with a key. Zipping
+ :meth:`keys` and this is the same as calling :meth:`lists`:
+
+ >>> d = MultiDict({"foo": [1, 2, 3]})
+ >>> zip(d.keys(), d.listvalues()) == d.lists()
+ True
+ """
+ return dict.values(self)
+
+ def copy(self):
+ """Return a shallow copy of this object."""
+ return self.__class__(self)
+
+ def deepcopy(self, memo=None):
+ """Return a deep copy of this object."""
+ return self.__class__(deepcopy(self.to_dict(flat=False), memo))
+
+ def to_dict(self, flat=True):
+ """Return the contents as regular dict. If `flat` is `True` the
+ returned dict will only have the first item present, if `flat` is
+ `False` all values will be returned as lists.
+
+ :param flat: If set to `False` the dict returned will have lists
+ with all the values in it. Otherwise it will only
+ contain the first value for each key.
+ :return: a :class:`dict`
+ """
+ if flat:
+ return dict(self.items())
+ return dict(self.lists())
+
+ def update(self, mapping):
+ """update() extends rather than replaces existing key lists:
+
+ >>> a = MultiDict({'x': 1})
+ >>> b = MultiDict({'x': 2, 'y': 3})
+ >>> a.update(b)
+ >>> a
+ MultiDict([('y', 3), ('x', 1), ('x', 2)])
+
+ If the value list for a key in ``other_dict`` is empty, no new values
+ will be added to the dict and the key will not be created:
+
+ >>> x = {'empty_list': []}
+ >>> y = MultiDict()
+ >>> y.update(x)
+ >>> y
+ MultiDict([])
+ """
+ for key, value in iter_multi_items(mapping):
+ MultiDict.add(self, key, value)
+
+ def pop(self, key, default=_missing):
+ """Pop the first item for a list on the dict. Afterwards the
+ key is removed from the dict, so additional values are discarded:
+
+ >>> d = MultiDict({"foo": [1, 2, 3]})
+ >>> d.pop("foo")
+ 1
+ >>> "foo" in d
+ False
+
+ :param key: the key to pop.
+ :param default: if provided the value to return if the key was
+ not in the dictionary.
+ """
+ try:
+ lst = dict.pop(self, key)
+
+ if len(lst) == 0:
+ raise exceptions.BadRequestKeyError(key)
+
+ return lst[0]
+ except KeyError:
+ if default is not _missing:
+ return default
+
+ raise exceptions.BadRequestKeyError(key) from None
+
+ def popitem(self):
+ """Pop an item from the dict."""
+ try:
+ item = dict.popitem(self)
+
+ if len(item[1]) == 0:
+ raise exceptions.BadRequestKeyError(item[0])
+
+ return (item[0], item[1][0])
+ except KeyError as e:
+ raise exceptions.BadRequestKeyError(e.args[0]) from None
+
+ def poplist(self, key):
+ """Pop the list for a key from the dict. If the key is not in the dict
+ an empty list is returned.
+
+ .. versionchanged:: 0.5
+ If the key does no longer exist a list is returned instead of
+ raising an error.
+ """
+ return dict.pop(self, key, [])
+
+ def popitemlist(self):
+ """Pop a ``(key, list)`` tuple from the dict."""
+ try:
+ return dict.popitem(self)
+ except KeyError as e:
+ raise exceptions.BadRequestKeyError(e.args[0]) from None
+
+ def __copy__(self):
+ return self.copy()
+
+ def __deepcopy__(self, memo):
+ return self.deepcopy(memo=memo)
+
+ def __repr__(self):
+ return f"{type(self).__name__}({list(self.items(multi=True))!r})"
+
+
+class _omd_bucket:
+ """Wraps values in the :class:`OrderedMultiDict`. This makes it
+ possible to keep an order over multiple different keys. It requires
+ a lot of extra memory and slows down access a lot, but makes it
+ possible to access elements in O(1) and iterate in O(n).
+ """
+
+ __slots__ = ("prev", "key", "value", "next")
+
+ def __init__(self, omd, key, value):
+ self.prev = omd._last_bucket
+ self.key = key
+ self.value = value
+ self.next = None
+
+ if omd._first_bucket is None:
+ omd._first_bucket = self
+ if omd._last_bucket is not None:
+ omd._last_bucket.next = self
+ omd._last_bucket = self
+
+ def unlink(self, omd):
+ if self.prev:
+ self.prev.next = self.next
+ if self.next:
+ self.next.prev = self.prev
+ if omd._first_bucket is self:
+ omd._first_bucket = self.next
+ if omd._last_bucket is self:
+ omd._last_bucket = self.prev
+
+
+class OrderedMultiDict(MultiDict):
+ """Works like a regular :class:`MultiDict` but preserves the
+ order of the fields. To convert the ordered multi dict into a
+ list you can use the :meth:`items` method and pass it ``multi=True``.
+
+ In general an :class:`OrderedMultiDict` is an order of magnitude
+ slower than a :class:`MultiDict`.
+
+ .. admonition:: note
+
+ Due to a limitation in Python you cannot convert an ordered
+ multi dict into a regular dict by using ``dict(multidict)``.
+ Instead you have to use the :meth:`to_dict` method, otherwise
+ the internal bucket objects are exposed.
+ """
+
+ def __init__(self, mapping=None):
+ dict.__init__(self)
+ self._first_bucket = self._last_bucket = None
+ if mapping is not None:
+ OrderedMultiDict.update(self, mapping)
+
+ def __eq__(self, other):
+ if not isinstance(other, MultiDict):
+ return NotImplemented
+ if isinstance(other, OrderedMultiDict):
+ iter1 = iter(self.items(multi=True))
+ iter2 = iter(other.items(multi=True))
+ try:
+ for k1, v1 in iter1:
+ k2, v2 = next(iter2)
+ if k1 != k2 or v1 != v2:
+ return False
+ except StopIteration:
+ return False
+ try:
+ next(iter2)
+ except StopIteration:
+ return True
+ return False
+ if len(self) != len(other):
+ return False
+ for key, values in self.lists():
+ if other.getlist(key) != values:
+ return False
+ return True
+
+ __hash__ = None
+
+ def __reduce_ex__(self, protocol):
+ return type(self), (list(self.items(multi=True)),)
+
+ def __getstate__(self):
+ return list(self.items(multi=True))
+
+ def __setstate__(self, values):
+ dict.clear(self)
+ for key, value in values:
+ self.add(key, value)
+
+ def __getitem__(self, key):
+ if key in self:
+ return dict.__getitem__(self, key)[0].value
+ raise exceptions.BadRequestKeyError(key)
+
+ def __setitem__(self, key, value):
+ self.poplist(key)
+ self.add(key, value)
+
+ def __delitem__(self, key):
+ self.pop(key)
+
+ def keys(self):
+ return (key for key, value in self.items())
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def values(self):
+ return (value for key, value in self.items())
+
+ def items(self, multi=False):
+ ptr = self._first_bucket
+ if multi:
+ while ptr is not None:
+ yield ptr.key, ptr.value
+ ptr = ptr.next
+ else:
+ returned_keys = set()
+ while ptr is not None:
+ if ptr.key not in returned_keys:
+ returned_keys.add(ptr.key)
+ yield ptr.key, ptr.value
+ ptr = ptr.next
+
+ def lists(self):
+ returned_keys = set()
+ ptr = self._first_bucket
+ while ptr is not None:
+ if ptr.key not in returned_keys:
+ yield ptr.key, self.getlist(ptr.key)
+ returned_keys.add(ptr.key)
+ ptr = ptr.next
+
+ def listvalues(self):
+ for _key, values in self.lists():
+ yield values
+
+ def add(self, key, value):
+ dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
+
+ def getlist(self, key, type=None):
+ try:
+ rv = dict.__getitem__(self, key)
+ except KeyError:
+ return []
+ if type is None:
+ return [x.value for x in rv]
+ result = []
+ for item in rv:
+ try:
+ result.append(type(item.value))
+ except ValueError:
+ pass
+ return result
+
+ def setlist(self, key, new_list):
+ self.poplist(key)
+ for value in new_list:
+ self.add(key, value)
+
+ def setlistdefault(self, key, default_list=None):
+ raise TypeError("setlistdefault is unsupported for ordered multi dicts")
+
+ def update(self, mapping):
+ for key, value in iter_multi_items(mapping):
+ OrderedMultiDict.add(self, key, value)
+
+ def poplist(self, key):
+ buckets = dict.pop(self, key, ())
+ for bucket in buckets:
+ bucket.unlink(self)
+ return [x.value for x in buckets]
+
+ def pop(self, key, default=_missing):
+ try:
+ buckets = dict.pop(self, key)
+ except KeyError:
+ if default is not _missing:
+ return default
+
+ raise exceptions.BadRequestKeyError(key) from None
+
+ for bucket in buckets:
+ bucket.unlink(self)
+
+ return buckets[0].value
+
+ def popitem(self):
+ try:
+ key, buckets = dict.popitem(self)
+ except KeyError as e:
+ raise exceptions.BadRequestKeyError(e.args[0]) from None
+
+ for bucket in buckets:
+ bucket.unlink(self)
+
+ return key, buckets[0].value
+
+ def popitemlist(self):
+ try:
+ key, buckets = dict.popitem(self)
+ except KeyError as e:
+ raise exceptions.BadRequestKeyError(e.args[0]) from None
+
+ for bucket in buckets:
+ bucket.unlink(self)
+
+ return key, [x.value for x in buckets]
+
+
+class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
+ """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
+ instances as sequence and it will combine the return values of all wrapped
+ dicts:
+
+ >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
+ >>> post = MultiDict([('foo', 'bar')])
+ >>> get = MultiDict([('blub', 'blah')])
+ >>> combined = CombinedMultiDict([get, post])
+ >>> combined['foo']
+ 'bar'
+ >>> combined['blub']
+ 'blah'
+
+ This works for all read operations and will raise a `TypeError` for
+ methods that usually change data which isn't possible.
+
+ From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+ subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+ render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
+ exceptions.
+ """
+
+ def __reduce_ex__(self, protocol):
+ return type(self), (self.dicts,)
+
+ def __init__(self, dicts=None):
+ self.dicts = list(dicts) or []
+
+ @classmethod
+ def fromkeys(cls, keys, value=None):
+ raise TypeError(f"cannot create {cls.__name__!r} instances by fromkeys")
+
+ def __getitem__(self, key):
+ for d in self.dicts:
+ if key in d:
+ return d[key]
+ raise exceptions.BadRequestKeyError(key)
+
+ def get(self, key, default=None, type=None):
+ for d in self.dicts:
+ if key in d:
+ if type is not None:
+ try:
+ return type(d[key])
+ except ValueError:
+ continue
+ return d[key]
+ return default
+
+ def getlist(self, key, type=None):
+ rv = []
+ for d in self.dicts:
+ rv.extend(d.getlist(key, type))
+ return rv
+
+ def _keys_impl(self):
+ """This function exists so __len__ can be implemented more efficiently,
+ saving one list creation from an iterator.
+ """
+ rv = set()
+ rv.update(*self.dicts)
+ return rv
+
+ def keys(self):
+ return self._keys_impl()
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def items(self, multi=False):
+ found = set()
+ for d in self.dicts:
+ for key, value in d.items(multi):
+ if multi:
+ yield key, value
+ elif key not in found:
+ found.add(key)
+ yield key, value
+
+ def values(self):
+ for _key, value in self.items():
+ yield value
+
+ def lists(self):
+ rv = {}
+ for d in self.dicts:
+ for key, values in d.lists():
+ rv.setdefault(key, []).extend(values)
+ return list(rv.items())
+
+ def listvalues(self):
+ return (x[1] for x in self.lists())
+
+ def copy(self):
+ """Return a shallow mutable copy of this object.
+
+ This returns a :class:`MultiDict` representing the data at the
+ time of copying. The copy will no longer reflect changes to the
+ wrapped dicts.
+
+ .. versionchanged:: 0.15
+ Return a mutable :class:`MultiDict`.
+ """
+ return MultiDict(self)
+
+ def to_dict(self, flat=True):
+ """Return the contents as regular dict. If `flat` is `True` the
+ returned dict will only have the first item present, if `flat` is
+ `False` all values will be returned as lists.
+
+ :param flat: If set to `False` the dict returned will have lists
+ with all the values in it. Otherwise it will only
+ contain the first item for each key.
+ :return: a :class:`dict`
+ """
+ if flat:
+ return dict(self.items())
+
+ return dict(self.lists())
+
+ def __len__(self):
+ return len(self._keys_impl())
+
+ def __contains__(self, key):
+ for d in self.dicts:
+ if key in d:
+ return True
+ return False
+
+ def __repr__(self):
+ return f"{type(self).__name__}({self.dicts!r})"
+
+
+class ImmutableDict(ImmutableDictMixin, dict):
+ """An immutable :class:`dict`.
+
+ .. versionadded:: 0.5
+ """
+
+ def __repr__(self):
+ return f"{type(self).__name__}({dict.__repr__(self)})"
+
+ def copy(self):
+ """Return a shallow mutable copy of this object. Keep in mind that
+ the standard library's :func:`copy` function is a no-op for this class
+ like for any other python immutable type (eg: :class:`tuple`).
+ """
+ return dict(self)
+
+ def __copy__(self):
+ return self
+
+
+class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
+ """An immutable :class:`MultiDict`.
+
+ .. versionadded:: 0.5
+ """
+
+ def copy(self):
+ """Return a shallow mutable copy of this object. Keep in mind that
+ the standard library's :func:`copy` function is a no-op for this class
+ like for any other python immutable type (eg: :class:`tuple`).
+ """
+ return MultiDict(self)
+
+ def __copy__(self):
+ return self
+
+
+class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
+ """An immutable :class:`OrderedMultiDict`.
+
+ .. versionadded:: 0.6
+ """
+
+ def _iter_hashitems(self):
+ return enumerate(self.items(multi=True))
+
+ def copy(self):
+ """Return a shallow mutable copy of this object. Keep in mind that
+ the standard library's :func:`copy` function is a no-op for this class
+ like for any other python immutable type (eg: :class:`tuple`).
+ """
+ return OrderedMultiDict(self)
+
+ def __copy__(self):
+ return self
+
+
+class CallbackDict(UpdateDictMixin, dict):
+ """A dict that calls a function passed every time something is changed.
+ The function is passed the dict instance.
+ """
+
+ def __init__(self, initial=None, on_update=None):
+ dict.__init__(self, initial or ())
+ self.on_update = on_update
+
+ def __repr__(self):
+ return f"<{type(self).__name__} {dict.__repr__(self)}>"
+
+
+class HeaderSet(MutableSet):
+ """Similar to the :class:`ETags` class this implements a set-like structure.
+ Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
+ content-language headers.
+
+ If not constructed using the :func:`parse_set_header` function the
+ instantiation works like this:
+
+ >>> hs = HeaderSet(['foo', 'bar', 'baz'])
+ >>> hs
+ HeaderSet(['foo', 'bar', 'baz'])
+ """
+
+ def __init__(self, headers=None, on_update=None):
+ self._headers = list(headers or ())
+ self._set = {x.lower() for x in self._headers}
+ self.on_update = on_update
+
+ def add(self, header):
+ """Add a new header to the set."""
+ self.update((header,))
+
+ def remove(self, header):
+ """Remove a header from the set. This raises an :exc:`KeyError` if the
+ header is not in the set.
+
+ .. versionchanged:: 0.5
+ In older versions a :exc:`IndexError` was raised instead of a
+ :exc:`KeyError` if the object was missing.
+
+ :param header: the header to be removed.
+ """
+ key = header.lower()
+ if key not in self._set:
+ raise KeyError(header)
+ self._set.remove(key)
+ for idx, key in enumerate(self._headers):
+ if key.lower() == header:
+ del self._headers[idx]
+ break
+ if self.on_update is not None:
+ self.on_update(self)
+
+ def update(self, iterable):
+ """Add all the headers from the iterable to the set.
+
+ :param iterable: updates the set with the items from the iterable.
+ """
+ inserted_any = False
+ for header in iterable:
+ key = header.lower()
+ if key not in self._set:
+ self._headers.append(header)
+ self._set.add(key)
+ inserted_any = True
+ if inserted_any and self.on_update is not None:
+ self.on_update(self)
+
+ def discard(self, header):
+ """Like :meth:`remove` but ignores errors.
+
+ :param header: the header to be discarded.
+ """
+ try:
+ self.remove(header)
+ except KeyError:
+ pass
+
+ def find(self, header):
+ """Return the index of the header in the set or return -1 if not found.
+
+ :param header: the header to be looked up.
+ """
+ header = header.lower()
+ for idx, item in enumerate(self._headers):
+ if item.lower() == header:
+ return idx
+ return -1
+
+ def index(self, header):
+ """Return the index of the header in the set or raise an
+ :exc:`IndexError`.
+
+ :param header: the header to be looked up.
+ """
+ rv = self.find(header)
+ if rv < 0:
+ raise IndexError(header)
+ return rv
+
+ def clear(self):
+ """Clear the set."""
+ self._set.clear()
+ del self._headers[:]
+ if self.on_update is not None:
+ self.on_update(self)
+
+ def as_set(self, preserve_casing=False):
+ """Return the set as real python set type. When calling this, all
+ the items are converted to lowercase and the ordering is lost.
+
+ :param preserve_casing: if set to `True` the items in the set returned
+ will have the original case like in the
+ :class:`HeaderSet`, otherwise they will
+ be lowercase.
+ """
+ if preserve_casing:
+ return set(self._headers)
+ return set(self._set)
+
+ def to_header(self):
+ """Convert the header set into an HTTP header string."""
+ return ", ".join(map(http.quote_header_value, self._headers))
+
+ def __getitem__(self, idx):
+ return self._headers[idx]
+
+ def __delitem__(self, idx):
+ rv = self._headers.pop(idx)
+ self._set.remove(rv.lower())
+ if self.on_update is not None:
+ self.on_update(self)
+
+ def __setitem__(self, idx, value):
+ old = self._headers[idx]
+ self._set.remove(old.lower())
+ self._headers[idx] = value
+ self._set.add(value.lower())
+ if self.on_update is not None:
+ self.on_update(self)
+
+ def __contains__(self, header):
+ return header.lower() in self._set
+
+ def __len__(self):
+ return len(self._set)
+
+ def __iter__(self):
+ return iter(self._headers)
+
+ def __bool__(self):
+ return bool(self._set)
+
+ def __str__(self):
+ return self.to_header()
+
+ def __repr__(self):
+ return f"{type(self).__name__}({self._headers!r})"
+
+
+# circular dependencies
+from .. import http
diff --git a/venv/lib/python3.8/site-packages/werkzeug/datastructures/structures.pyi b/venv/lib/python3.8/site-packages/werkzeug/datastructures/structures.pyi
new file mode 100644
index 0000000..7086dda
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/datastructures/structures.pyi
@@ -0,0 +1,206 @@
+from collections.abc import Callable
+from collections.abc import Iterable
+from collections.abc import Iterator
+from collections.abc import Mapping
+from typing import Any
+from typing import Generic
+from typing import Literal
+from typing import NoReturn
+from typing import overload
+from typing import TypeVar
+
+from .mixins import ImmutableDictMixin
+from .mixins import ImmutableListMixin
+from .mixins import ImmutableMultiDictMixin
+from .mixins import UpdateDictMixin
+
+D = TypeVar("D")
+K = TypeVar("K")
+T = TypeVar("T")
+V = TypeVar("V")
+_CD = TypeVar("_CD", bound="CallbackDict[Any, Any]")
+
+def is_immutable(self: object) -> NoReturn: ...
+def iter_multi_items(
+ mapping: Mapping[K, V | Iterable[V]] | Iterable[tuple[K, V]],
+) -> Iterator[tuple[K, V]]: ...
+
+class ImmutableList(ImmutableListMixin[V]): ...
+
+class TypeConversionDict(dict[K, V]):
+ @overload
+ def get(self, key: K, default: None = ..., type: None = ...) -> V | None: ...
+ @overload
+ def get(self, key: K, default: D, type: None = ...) -> D | V: ...
+ @overload
+ def get(self, key: K, default: D, type: Callable[[V], T]) -> D | T: ...
+ @overload
+ def get(self, key: K, type: Callable[[V], T]) -> T | None: ...
+
+class ImmutableTypeConversionDict(ImmutableDictMixin[K, V], TypeConversionDict[K, V]):
+ def copy(self) -> TypeConversionDict[K, V]: ...
+ def __copy__(self) -> ImmutableTypeConversionDict[K, V]: ...
+
+class MultiDict(TypeConversionDict[K, V]):
+ def __init__(
+ self,
+ mapping: Mapping[K, Iterable[V] | V] | Iterable[tuple[K, V]] | None = None,
+ ) -> None: ...
+ def __getitem__(self, item: K) -> V: ...
+ def __setitem__(self, key: K, value: V) -> None: ...
+ def add(self, key: K, value: V) -> None: ...
+ @overload
+ def getlist(self, key: K) -> list[V]: ...
+ @overload
+ def getlist(self, key: K, type: Callable[[V], T] = ...) -> list[T]: ...
+ def setlist(self, key: K, new_list: Iterable[V]) -> None: ...
+ def setdefault(self, key: K, default: V | None = None) -> V: ...
+ def setlistdefault(
+ self, key: K, default_list: Iterable[V] | None = None
+ ) -> list[V]: ...
+ def items(self, multi: bool = False) -> Iterator[tuple[K, V]]: ... # type: ignore
+ def lists(self) -> Iterator[tuple[K, list[V]]]: ...
+ def values(self) -> Iterator[V]: ... # type: ignore
+ def listvalues(self) -> Iterator[list[V]]: ...
+ def copy(self) -> MultiDict[K, V]: ...
+ def deepcopy(self, memo: Any = None) -> MultiDict[K, V]: ...
+ @overload
+ def to_dict(self) -> dict[K, V]: ...
+ @overload
+ def to_dict(self, flat: Literal[False]) -> dict[K, list[V]]: ...
+ def update( # type: ignore
+ self, mapping: Mapping[K, Iterable[V] | V] | Iterable[tuple[K, V]]
+ ) -> None: ...
+ @overload
+ def pop(self, key: K) -> V: ...
+ @overload
+ def pop(self, key: K, default: V | T = ...) -> V | T: ...
+ def popitem(self) -> tuple[K, V]: ...
+ def poplist(self, key: K) -> list[V]: ...
+ def popitemlist(self) -> tuple[K, list[V]]: ...
+ def __copy__(self) -> MultiDict[K, V]: ...
+ def __deepcopy__(self, memo: Any) -> MultiDict[K, V]: ...
+
+class _omd_bucket(Generic[K, V]):
+ prev: _omd_bucket[K, V] | None
+ next: _omd_bucket[K, V] | None
+ key: K
+ value: V
+ def __init__(self, omd: OrderedMultiDict[K, V], key: K, value: V) -> None: ...
+ def unlink(self, omd: OrderedMultiDict[K, V]) -> None: ...
+
+class OrderedMultiDict(MultiDict[K, V]):
+ _first_bucket: _omd_bucket[K, V] | None
+ _last_bucket: _omd_bucket[K, V] | None
+ def __init__(self, mapping: Mapping[K, V] | None = None) -> None: ...
+ def __eq__(self, other: object) -> bool: ...
+ def __getitem__(self, key: K) -> V: ...
+ def __setitem__(self, key: K, value: V) -> None: ...
+ def __delitem__(self, key: K) -> None: ...
+ def keys(self) -> Iterator[K]: ... # type: ignore
+ def __iter__(self) -> Iterator[K]: ...
+ def values(self) -> Iterator[V]: ... # type: ignore
+ def items(self, multi: bool = False) -> Iterator[tuple[K, V]]: ... # type: ignore
+ def lists(self) -> Iterator[tuple[K, list[V]]]: ...
+ def listvalues(self) -> Iterator[list[V]]: ...
+ def add(self, key: K, value: V) -> None: ...
+ @overload
+ def getlist(self, key: K) -> list[V]: ...
+ @overload
+ def getlist(self, key: K, type: Callable[[V], T] = ...) -> list[T]: ...
+ def setlist(self, key: K, new_list: Iterable[V]) -> None: ...
+ def setlistdefault(
+ self, key: K, default_list: Iterable[V] | None = None
+ ) -> list[V]: ...
+ def update( # type: ignore
+ self, mapping: Mapping[K, V] | Iterable[tuple[K, V]]
+ ) -> None: ...
+ def poplist(self, key: K) -> list[V]: ...
+ @overload
+ def pop(self, key: K) -> V: ...
+ @overload
+ def pop(self, key: K, default: V | T = ...) -> V | T: ...
+ def popitem(self) -> tuple[K, V]: ...
+ def popitemlist(self) -> tuple[K, list[V]]: ...
+
+class CombinedMultiDict(ImmutableMultiDictMixin[K, V], MultiDict[K, V]): # type: ignore
+ dicts: list[MultiDict[K, V]]
+ def __init__(self, dicts: Iterable[MultiDict[K, V]] | None) -> None: ...
+ @classmethod
+ def fromkeys(cls, keys: Any, value: Any = None) -> NoReturn: ...
+ def __getitem__(self, key: K) -> V: ...
+ @overload # type: ignore
+ def get(self, key: K) -> V | None: ...
+ @overload
+ def get(self, key: K, default: V | T = ...) -> V | T: ...
+ @overload
+ def get(
+ self, key: K, default: T | None = None, type: Callable[[V], T] = ...
+ ) -> T | None: ...
+ @overload
+ def getlist(self, key: K) -> list[V]: ...
+ @overload
+ def getlist(self, key: K, type: Callable[[V], T] = ...) -> list[T]: ...
+ def _keys_impl(self) -> set[K]: ...
+ def keys(self) -> set[K]: ... # type: ignore
+ def __iter__(self) -> set[K]: ... # type: ignore
+ def items(self, multi: bool = False) -> Iterator[tuple[K, V]]: ... # type: ignore
+ def values(self) -> Iterator[V]: ... # type: ignore
+ def lists(self) -> Iterator[tuple[K, list[V]]]: ...
+ def listvalues(self) -> Iterator[list[V]]: ...
+ def copy(self) -> MultiDict[K, V]: ...
+ @overload
+ def to_dict(self) -> dict[K, V]: ...
+ @overload
+ def to_dict(self, flat: Literal[False]) -> dict[K, list[V]]: ...
+ def __contains__(self, key: K) -> bool: ... # type: ignore
+ def has_key(self, key: K) -> bool: ...
+
+class ImmutableDict(ImmutableDictMixin[K, V], dict[K, V]):
+ def copy(self) -> dict[K, V]: ...
+ def __copy__(self) -> ImmutableDict[K, V]: ...
+
+class ImmutableMultiDict( # type: ignore
+ ImmutableMultiDictMixin[K, V], MultiDict[K, V]
+):
+ def copy(self) -> MultiDict[K, V]: ...
+ def __copy__(self) -> ImmutableMultiDict[K, V]: ...
+
+class ImmutableOrderedMultiDict( # type: ignore
+ ImmutableMultiDictMixin[K, V], OrderedMultiDict[K, V]
+):
+ def _iter_hashitems(self) -> Iterator[tuple[int, tuple[K, V]]]: ...
+ def copy(self) -> OrderedMultiDict[K, V]: ...
+ def __copy__(self) -> ImmutableOrderedMultiDict[K, V]: ...
+
+class CallbackDict(UpdateDictMixin[K, V], dict[K, V]):
+ def __init__(
+ self,
+ initial: Mapping[K, V] | Iterable[tuple[K, V]] | None = None,
+ on_update: Callable[[_CD], None] | None = None,
+ ) -> None: ...
+
+class HeaderSet(set[str]):
+ _headers: list[str]
+ _set: set[str]
+ on_update: Callable[[HeaderSet], None] | None
+ def __init__(
+ self,
+ headers: Iterable[str] | None = None,
+ on_update: Callable[[HeaderSet], None] | None = None,
+ ) -> None: ...
+ def add(self, header: str) -> None: ...
+ def remove(self, header: str) -> None: ...
+ def update(self, iterable: Iterable[str]) -> None: ... # type: ignore
+ def discard(self, header: str) -> None: ...
+ def find(self, header: str) -> int: ...
+ def index(self, header: str) -> int: ...
+ def clear(self) -> None: ...
+ def as_set(self, preserve_casing: bool = False) -> set[str]: ...
+ def to_header(self) -> str: ...
+ def __getitem__(self, idx: int) -> str: ...
+ def __delitem__(self, idx: int) -> None: ...
+ def __setitem__(self, idx: int, value: str) -> None: ...
+ def __contains__(self, header: str) -> bool: ... # type: ignore
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[str]: ...
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/__init__.py b/venv/lib/python3.8/site-packages/werkzeug/debug/__init__.py
new file mode 100644
index 0000000..0c4cabd
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/__init__.py
@@ -0,0 +1,565 @@
+from __future__ import annotations
+
+import getpass
+import hashlib
+import json
+import os
+import pkgutil
+import re
+import sys
+import time
+import typing as t
+import uuid
+from contextlib import ExitStack
+from io import BytesIO
+from itertools import chain
+from multiprocessing import Value
+from os.path import basename
+from os.path import join
+from zlib import adler32
+
+from .._internal import _log
+from ..exceptions import NotFound
+from ..exceptions import SecurityError
+from ..http import parse_cookie
+from ..sansio.utils import host_is_trusted
+from ..security import gen_salt
+from ..utils import send_file
+from ..wrappers.request import Request
+from ..wrappers.response import Response
+from .console import Console
+from .tbtools import DebugFrameSummary
+from .tbtools import DebugTraceback
+from .tbtools import render_console_html
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+# A week
+PIN_TIME = 60 * 60 * 24 * 7
+
+
+def hash_pin(pin: str) -> str:
+ return hashlib.sha1(f"{pin} added salt".encode("utf-8", "replace")).hexdigest()[:12]
+
+
+_machine_id: str | bytes | None = None
+
+
+def get_machine_id() -> str | bytes | None:
+ global _machine_id
+
+ if _machine_id is not None:
+ return _machine_id
+
+ def _generate() -> str | bytes | None:
+ linux = b""
+
+ # machine-id is stable across boots, boot_id is not.
+ for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
+ try:
+ with open(filename, "rb") as f:
+ value = f.readline().strip()
+ except OSError:
+ continue
+
+ if value:
+ linux += value
+ break
+
+ # Containers share the same machine id, add some cgroup
+ # information. This is used outside containers too but should be
+ # relatively stable across boots.
+ try:
+ with open("/proc/self/cgroup", "rb") as f:
+ linux += f.readline().strip().rpartition(b"/")[2]
+ except OSError:
+ pass
+
+ if linux:
+ return linux
+
+ # On OS X, use ioreg to get the computer's serial number.
+ try:
+ # subprocess may not be available, e.g. Google App Engine
+ # https://github.com/pallets/werkzeug/issues/925
+ from subprocess import PIPE
+ from subprocess import Popen
+
+ dump = Popen(
+ ["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
+ ).communicate()[0]
+ match = re.search(b'"serial-number" = <([^>]+)', dump)
+
+ if match is not None:
+ return match.group(1)
+ except (OSError, ImportError):
+ pass
+
+ # On Windows, use winreg to get the machine guid.
+ if sys.platform == "win32":
+ import winreg
+
+ try:
+ with winreg.OpenKey(
+ winreg.HKEY_LOCAL_MACHINE,
+ "SOFTWARE\\Microsoft\\Cryptography",
+ 0,
+ winreg.KEY_READ | winreg.KEY_WOW64_64KEY,
+ ) as rk:
+ guid: str | bytes
+ guid_type: int
+ guid, guid_type = winreg.QueryValueEx(rk, "MachineGuid")
+
+ if guid_type == winreg.REG_SZ:
+ return guid.encode()
+
+ return guid
+ except OSError:
+ pass
+
+ return None
+
+ _machine_id = _generate()
+ return _machine_id
+
+
+class _ConsoleFrame:
+ """Helper class so that we can reuse the frame console code for the
+ standalone console.
+ """
+
+ def __init__(self, namespace: dict[str, t.Any]):
+ self.console = Console(namespace)
+ self.id = 0
+
+ def eval(self, code: str) -> t.Any:
+ return self.console.eval(code)
+
+
+def get_pin_and_cookie_name(
+ app: WSGIApplication,
+) -> tuple[str, str] | tuple[None, None]:
+ """Given an application object this returns a semi-stable 9 digit pin
+ code and a random key. The hope is that this is stable between
+ restarts to not make debugging particularly frustrating. If the pin
+ was forcefully disabled this returns `None`.
+
+ Second item in the resulting tuple is the cookie name for remembering.
+ """
+ pin = os.environ.get("WERKZEUG_DEBUG_PIN")
+ rv = None
+ num = None
+
+ # Pin was explicitly disabled
+ if pin == "off":
+ return None, None
+
+ # Pin was provided explicitly
+ if pin is not None and pin.replace("-", "").isdecimal():
+ # If there are separators in the pin, return it directly
+ if "-" in pin:
+ rv = pin
+ else:
+ num = pin
+
+ modname = getattr(app, "__module__", t.cast(object, app).__class__.__module__)
+ username: str | None
+
+ try:
+ # getuser imports the pwd module, which does not exist in Google
+ # App Engine. It may also raise a KeyError if the UID does not
+ # have a username, such as in Docker.
+ username = getpass.getuser()
+ # Python >= 3.13 only raises OSError
+ except (ImportError, KeyError, OSError):
+ username = None
+
+ mod = sys.modules.get(modname)
+
+ # This information only exists to make the cookie unique on the
+ # computer, not as a security feature.
+ probably_public_bits = [
+ username,
+ modname,
+ getattr(app, "__name__", type(app).__name__),
+ getattr(mod, "__file__", None),
+ ]
+
+ # This information is here to make it harder for an attacker to
+ # guess the cookie name. They are unlikely to be contained anywhere
+ # within the unauthenticated debug page.
+ private_bits = [str(uuid.getnode()), get_machine_id()]
+
+ h = hashlib.sha1()
+ for bit in chain(probably_public_bits, private_bits):
+ if not bit:
+ continue
+ if isinstance(bit, str):
+ bit = bit.encode()
+ h.update(bit)
+ h.update(b"cookiesalt")
+
+ cookie_name = f"__wzd{h.hexdigest()[:20]}"
+
+ # If we need to generate a pin we salt it a bit more so that we don't
+ # end up with the same value and generate out 9 digits
+ if num is None:
+ h.update(b"pinsalt")
+ num = f"{int(h.hexdigest(), 16):09d}"[:9]
+
+ # Format the pincode in groups of digits for easier remembering if
+ # we don't have a result yet.
+ if rv is None:
+ for group_size in 5, 4, 3:
+ if len(num) % group_size == 0:
+ rv = "-".join(
+ num[x : x + group_size].rjust(group_size, "0")
+ for x in range(0, len(num), group_size)
+ )
+ break
+ else:
+ rv = num
+
+ return rv, cookie_name
+
+
+class DebuggedApplication:
+ """Enables debugging support for a given application::
+
+ from werkzeug.debug import DebuggedApplication
+ from myapp import app
+ app = DebuggedApplication(app, evalex=True)
+
+ The ``evalex`` argument allows evaluating expressions in any frame
+ of a traceback. This works by preserving each frame with its local
+ state. Some state, such as context globals, cannot be restored with
+ the frame by default. When ``evalex`` is enabled,
+ ``environ["werkzeug.debug.preserve_context"]`` will be a callable
+ that takes a context manager, and can be called multiple times.
+ Each context manager will be entered before evaluating code in the
+ frame, then exited again, so they can perform setup and cleanup for
+ each call.
+
+ :param app: the WSGI application to run debugged.
+ :param evalex: enable exception evaluation feature (interactive
+ debugging). This requires a non-forking server.
+ :param request_key: The key that points to the request object in this
+ environment. This parameter is ignored in current
+ versions.
+ :param console_path: the URL for a general purpose console.
+ :param console_init_func: the function that is executed before starting
+ the general purpose console. The return value
+ is used as initial namespace.
+ :param show_hidden_frames: by default hidden traceback frames are skipped.
+ You can show them by setting this parameter
+ to `True`.
+ :param pin_security: can be used to disable the pin based security system.
+ :param pin_logging: enables the logging of the pin system.
+
+ .. versionchanged:: 2.2
+ Added the ``werkzeug.debug.preserve_context`` environ key.
+ """
+
+ _pin: str
+ _pin_cookie: str
+
+ def __init__(
+ self,
+ app: WSGIApplication,
+ evalex: bool = False,
+ request_key: str = "werkzeug.request",
+ console_path: str = "/console",
+ console_init_func: t.Callable[[], dict[str, t.Any]] | None = None,
+ show_hidden_frames: bool = False,
+ pin_security: bool = True,
+ pin_logging: bool = True,
+ ) -> None:
+ if not console_init_func:
+ console_init_func = None
+ self.app = app
+ self.evalex = evalex
+ self.frames: dict[int, DebugFrameSummary | _ConsoleFrame] = {}
+ self.frame_contexts: dict[int, list[t.ContextManager[None]]] = {}
+ self.request_key = request_key
+ self.console_path = console_path
+ self.console_init_func = console_init_func
+ self.show_hidden_frames = show_hidden_frames
+ self.secret = gen_salt(20)
+ self._failed_pin_auth = Value("B")
+
+ self.pin_logging = pin_logging
+ if pin_security:
+ # Print out the pin for the debugger on standard out.
+ if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
+ _log("warning", " * Debugger is active!")
+ if self.pin is None:
+ _log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
+ else:
+ _log("info", " * Debugger PIN: %s", self.pin)
+ else:
+ self.pin = None
+
+ self.trusted_hosts: list[str] = [".localhost", "127.0.0.1"]
+ """List of domains to allow requests to the debugger from. A leading dot
+ allows all subdomains. This only allows ``".localhost"`` domains by
+ default.
+
+ .. versionadded:: 3.0.3
+ """
+
+ @property
+ def pin(self) -> str | None:
+ if not hasattr(self, "_pin"):
+ pin_cookie = get_pin_and_cookie_name(self.app)
+ self._pin, self._pin_cookie = pin_cookie # type: ignore
+ return self._pin
+
+ @pin.setter
+ def pin(self, value: str) -> None:
+ self._pin = value
+
+ @property
+ def pin_cookie_name(self) -> str:
+ """The name of the pin cookie."""
+ if not hasattr(self, "_pin_cookie"):
+ pin_cookie = get_pin_and_cookie_name(self.app)
+ self._pin, self._pin_cookie = pin_cookie # type: ignore
+ return self._pin_cookie
+
+ def debug_application(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterator[bytes]:
+ """Run the application and conserve the traceback frames."""
+ contexts: list[t.ContextManager[t.Any]] = []
+
+ if self.evalex:
+ environ["werkzeug.debug.preserve_context"] = contexts.append
+
+ app_iter = None
+ try:
+ app_iter = self.app(environ, start_response)
+ yield from app_iter
+ if hasattr(app_iter, "close"):
+ app_iter.close()
+ except Exception as e:
+ if hasattr(app_iter, "close"):
+ app_iter.close() # type: ignore
+
+ tb = DebugTraceback(e, skip=1, hide=not self.show_hidden_frames)
+
+ for frame in tb.all_frames:
+ self.frames[id(frame)] = frame
+ self.frame_contexts[id(frame)] = contexts
+
+ is_trusted = bool(self.check_pin_trust(environ))
+ html = tb.render_debugger_html(
+ evalex=self.evalex and self.check_host_trust(environ),
+ secret=self.secret,
+ evalex_trusted=is_trusted,
+ )
+ response = Response(html, status=500, mimetype="text/html")
+
+ try:
+ yield from response(environ, start_response)
+ except Exception:
+ # if we end up here there has been output but an error
+ # occurred. in that situation we can do nothing fancy any
+ # more, better log something into the error log and fall
+ # back gracefully.
+ environ["wsgi.errors"].write(
+ "Debugging middleware caught exception in streamed "
+ "response at a point where response headers were already "
+ "sent.\n"
+ )
+
+ environ["wsgi.errors"].write("".join(tb.render_traceback_text()))
+
+ def execute_command(
+ self,
+ request: Request,
+ command: str,
+ frame: DebugFrameSummary | _ConsoleFrame,
+ ) -> Response:
+ """Execute a command in a console."""
+ if not self.check_host_trust(request.environ):
+ return SecurityError() # type: ignore[return-value]
+
+ contexts = self.frame_contexts.get(id(frame), [])
+
+ with ExitStack() as exit_stack:
+ for cm in contexts:
+ exit_stack.enter_context(cm)
+
+ return Response(frame.eval(command), mimetype="text/html")
+
+ def display_console(self, request: Request) -> Response:
+ """Display a standalone shell."""
+ if not self.check_host_trust(request.environ):
+ return SecurityError() # type: ignore[return-value]
+
+ if 0 not in self.frames:
+ if self.console_init_func is None:
+ ns = {}
+ else:
+ ns = dict(self.console_init_func())
+ ns.setdefault("app", self.app)
+ self.frames[0] = _ConsoleFrame(ns)
+ is_trusted = bool(self.check_pin_trust(request.environ))
+ return Response(
+ render_console_html(secret=self.secret, evalex_trusted=is_trusted),
+ mimetype="text/html",
+ )
+
+ def get_resource(self, request: Request, filename: str) -> Response:
+ """Return a static resource from the shared folder."""
+ path = join("shared", basename(filename))
+
+ try:
+ data = pkgutil.get_data(__package__, path)
+ except OSError:
+ return NotFound() # type: ignore[return-value]
+ else:
+ if data is None:
+ return NotFound() # type: ignore[return-value]
+
+ etag = str(adler32(data) & 0xFFFFFFFF)
+ return send_file(
+ BytesIO(data), request.environ, download_name=filename, etag=etag
+ )
+
+ def check_pin_trust(self, environ: WSGIEnvironment) -> bool | None:
+ """Checks if the request passed the pin test. This returns `True` if the
+ request is trusted on a pin/cookie basis and returns `False` if not.
+ Additionally if the cookie's stored pin hash is wrong it will return
+ `None` so that appropriate action can be taken.
+ """
+ if self.pin is None:
+ return True
+ val = parse_cookie(environ).get(self.pin_cookie_name)
+ if not val or "|" not in val:
+ return False
+ ts_str, pin_hash = val.split("|", 1)
+
+ try:
+ ts = int(ts_str)
+ except ValueError:
+ return False
+
+ if pin_hash != hash_pin(self.pin):
+ return None
+ return (time.time() - PIN_TIME) < ts
+
+ def check_host_trust(self, environ: WSGIEnvironment) -> bool:
+ return host_is_trusted(environ.get("HTTP_HOST"), self.trusted_hosts)
+
+ def _fail_pin_auth(self) -> None:
+ with self._failed_pin_auth.get_lock():
+ count = self._failed_pin_auth.value
+ self._failed_pin_auth.value = count + 1
+
+ time.sleep(5.0 if count > 5 else 0.5)
+
+ def pin_auth(self, request: Request) -> Response:
+ """Authenticates with the pin."""
+ if not self.check_host_trust(request.environ):
+ return SecurityError() # type: ignore[return-value]
+
+ exhausted = False
+ auth = False
+ trust = self.check_pin_trust(request.environ)
+ pin = t.cast(str, self.pin)
+
+ # If the trust return value is `None` it means that the cookie is
+ # set but the stored pin hash value is bad. This means that the
+ # pin was changed. In this case we count a bad auth and unset the
+ # cookie. This way it becomes harder to guess the cookie name
+ # instead of the pin as we still count up failures.
+ bad_cookie = False
+ if trust is None:
+ self._fail_pin_auth()
+ bad_cookie = True
+
+ # If we're trusted, we're authenticated.
+ elif trust:
+ auth = True
+
+ # If we failed too many times, then we're locked out.
+ elif self._failed_pin_auth.value > 10:
+ exhausted = True
+
+ # Otherwise go through pin based authentication
+ else:
+ entered_pin = request.args["pin"]
+
+ if entered_pin.strip().replace("-", "") == pin.replace("-", ""):
+ self._failed_pin_auth.value = 0
+ auth = True
+ else:
+ self._fail_pin_auth()
+
+ rv = Response(
+ json.dumps({"auth": auth, "exhausted": exhausted}),
+ mimetype="application/json",
+ )
+ if auth:
+ rv.set_cookie(
+ self.pin_cookie_name,
+ f"{int(time.time())}|{hash_pin(pin)}",
+ httponly=True,
+ samesite="Strict",
+ secure=request.is_secure,
+ )
+ elif bad_cookie:
+ rv.delete_cookie(self.pin_cookie_name)
+ return rv
+
+ def log_pin_request(self, request: Request) -> Response:
+ """Log the pin if needed."""
+ if not self.check_host_trust(request.environ):
+ return SecurityError() # type: ignore[return-value]
+
+ if self.pin_logging and self.pin is not None:
+ _log(
+ "info", " * To enable the debugger you need to enter the security pin:"
+ )
+ _log("info", " * Debugger pin code: %s", self.pin)
+ return Response("")
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ """Dispatch the requests."""
+ # important: don't ever access a function here that reads the incoming
+ # form data! Otherwise the application won't have access to that data
+ # any more!
+ request = Request(environ)
+ response = self.debug_application
+ if request.args.get("__debugger__") == "yes":
+ cmd = request.args.get("cmd")
+ arg = request.args.get("f")
+ secret = request.args.get("s")
+ frame = self.frames.get(request.args.get("frm", type=int)) # type: ignore
+ if cmd == "resource" and arg:
+ response = self.get_resource(request, arg) # type: ignore
+ elif cmd == "pinauth" and secret == self.secret:
+ response = self.pin_auth(request) # type: ignore
+ elif cmd == "printpin" and secret == self.secret:
+ response = self.log_pin_request(request) # type: ignore
+ elif (
+ self.evalex
+ and cmd is not None
+ and frame is not None
+ and self.secret == secret
+ and self.check_pin_trust(environ)
+ ):
+ response = self.execute_command(request, cmd, frame) # type: ignore
+ elif (
+ self.evalex
+ and self.console_path is not None
+ and request.path == self.console_path
+ ):
+ response = self.display_console(request) # type: ignore
+ return response(environ, start_response)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/console.py b/venv/lib/python3.8/site-packages/werkzeug/debug/console.py
new file mode 100644
index 0000000..4e40475
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/console.py
@@ -0,0 +1,219 @@
+from __future__ import annotations
+
+import code
+import sys
+import typing as t
+from contextvars import ContextVar
+from types import CodeType
+
+from markupsafe import escape
+
+from .repr import debug_repr
+from .repr import dump
+from .repr import helper
+
+_stream: ContextVar[HTMLStringO] = ContextVar("werkzeug.debug.console.stream")
+_ipy: ContextVar[_InteractiveConsole] = ContextVar("werkzeug.debug.console.ipy")
+
+
+class HTMLStringO:
+ """A StringO version that HTML escapes on write."""
+
+ def __init__(self) -> None:
+ self._buffer: list[str] = []
+
+ def isatty(self) -> bool:
+ return False
+
+ def close(self) -> None:
+ pass
+
+ def flush(self) -> None:
+ pass
+
+ def seek(self, n: int, mode: int = 0) -> None:
+ pass
+
+ def readline(self) -> str:
+ if len(self._buffer) == 0:
+ return ""
+ ret = self._buffer[0]
+ del self._buffer[0]
+ return ret
+
+ def reset(self) -> str:
+ val = "".join(self._buffer)
+ del self._buffer[:]
+ return val
+
+ def _write(self, x: str) -> None:
+ self._buffer.append(x)
+
+ def write(self, x: str) -> None:
+ self._write(escape(x))
+
+ def writelines(self, x: t.Iterable[str]) -> None:
+ self._write(escape("".join(x)))
+
+
+class ThreadedStream:
+ """Thread-local wrapper for sys.stdout for the interactive console."""
+
+ @staticmethod
+ def push() -> None:
+ if not isinstance(sys.stdout, ThreadedStream):
+ sys.stdout = t.cast(t.TextIO, ThreadedStream())
+
+ _stream.set(HTMLStringO())
+
+ @staticmethod
+ def fetch() -> str:
+ try:
+ stream = _stream.get()
+ except LookupError:
+ return ""
+
+ return stream.reset()
+
+ @staticmethod
+ def displayhook(obj: object) -> None:
+ try:
+ stream = _stream.get()
+ except LookupError:
+ return _displayhook(obj) # type: ignore
+
+ # stream._write bypasses escaping as debug_repr is
+ # already generating HTML for us.
+ if obj is not None:
+ _ipy.get().locals["_"] = obj
+ stream._write(debug_repr(obj))
+
+ def __setattr__(self, name: str, value: t.Any) -> None:
+ raise AttributeError(f"read only attribute {name}")
+
+ def __dir__(self) -> list[str]:
+ return dir(sys.__stdout__)
+
+ def __getattribute__(self, name: str) -> t.Any:
+ try:
+ stream = _stream.get()
+ except LookupError:
+ stream = sys.__stdout__ # type: ignore[assignment]
+
+ return getattr(stream, name)
+
+ def __repr__(self) -> str:
+ return repr(sys.__stdout__)
+
+
+# add the threaded stream as display hook
+_displayhook = sys.displayhook
+sys.displayhook = ThreadedStream.displayhook
+
+
+class _ConsoleLoader:
+ def __init__(self) -> None:
+ self._storage: dict[int, str] = {}
+
+ def register(self, code: CodeType, source: str) -> None:
+ self._storage[id(code)] = source
+ # register code objects of wrapped functions too.
+ for var in code.co_consts:
+ if isinstance(var, CodeType):
+ self._storage[id(var)] = source
+
+ def get_source_by_code(self, code: CodeType) -> str | None:
+ try:
+ return self._storage[id(code)]
+ except KeyError:
+ return None
+
+
+class _InteractiveConsole(code.InteractiveInterpreter):
+ locals: dict[str, t.Any]
+
+ def __init__(self, globals: dict[str, t.Any], locals: dict[str, t.Any]) -> None:
+ self.loader = _ConsoleLoader()
+ locals = {
+ **globals,
+ **locals,
+ "dump": dump,
+ "help": helper,
+ "__loader__": self.loader,
+ }
+ super().__init__(locals)
+ original_compile = self.compile
+
+ def compile(source: str, filename: str, symbol: str) -> CodeType | None:
+ code = original_compile(source, filename, symbol)
+
+ if code is not None:
+ self.loader.register(code, source)
+
+ return code
+
+ self.compile = compile # type: ignore[assignment]
+ self.more = False
+ self.buffer: list[str] = []
+
+ def runsource(self, source: str, **kwargs: t.Any) -> str: # type: ignore
+ source = f"{source.rstrip()}\n"
+ ThreadedStream.push()
+ prompt = "... " if self.more else ">>> "
+ try:
+ source_to_eval = "".join(self.buffer + [source])
+ if super().runsource(source_to_eval, "<debugger>", "single"):
+ self.more = True
+ self.buffer.append(source)
+ else:
+ self.more = False
+ del self.buffer[:]
+ finally:
+ output = ThreadedStream.fetch()
+ return f"{prompt}{escape(source)}{output}"
+
+ def runcode(self, code: CodeType) -> None:
+ try:
+ exec(code, self.locals)
+ except Exception:
+ self.showtraceback()
+
+ def showtraceback(self) -> None:
+ from .tbtools import DebugTraceback
+
+ exc = t.cast(BaseException, sys.exc_info()[1])
+ te = DebugTraceback(exc, skip=1)
+ sys.stdout._write(te.render_traceback_html()) # type: ignore
+
+ def showsyntaxerror(self, filename: str | None = None) -> None:
+ from .tbtools import DebugTraceback
+
+ exc = t.cast(BaseException, sys.exc_info()[1])
+ te = DebugTraceback(exc, skip=4)
+ sys.stdout._write(te.render_traceback_html()) # type: ignore
+
+ def write(self, data: str) -> None:
+ sys.stdout.write(data)
+
+
+class Console:
+ """An interactive console."""
+
+ def __init__(
+ self,
+ globals: dict[str, t.Any] | None = None,
+ locals: dict[str, t.Any] | None = None,
+ ) -> None:
+ if locals is None:
+ locals = {}
+ if globals is None:
+ globals = {}
+ self._ipy = _InteractiveConsole(globals, locals)
+
+ def eval(self, code: str) -> str:
+ _ipy.set(self._ipy)
+ old_sys_stdout = sys.stdout
+ try:
+ return self._ipy.runsource(code)
+ finally:
+ sys.stdout = old_sys_stdout
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/repr.py b/venv/lib/python3.8/site-packages/werkzeug/debug/repr.py
new file mode 100644
index 0000000..2bbd9d5
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/repr.py
@@ -0,0 +1,282 @@
+"""Object representations for debugging purposes. Unlike the default
+repr, these expose more information and produce HTML instead of ASCII.
+
+Together with the CSS and JavaScript of the debugger this gives a
+colorful and more compact output.
+"""
+
+from __future__ import annotations
+
+import codecs
+import re
+import sys
+import typing as t
+from collections import deque
+from traceback import format_exception_only
+
+from markupsafe import escape
+
+missing = object()
+_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
+RegexType = type(_paragraph_re)
+
+HELP_HTML = """\
+<div class=box>
+ <h3>%(title)s</h3>
+ <pre class=help>%(text)s</pre>
+</div>\
+"""
+OBJECT_DUMP_HTML = """\
+<div class=box>
+ <h3>%(title)s</h3>
+ %(repr)s
+ <table>%(items)s</table>
+</div>\
+"""
+
+
+def debug_repr(obj: object) -> str:
+ """Creates a debug repr of an object as HTML string."""
+ return DebugReprGenerator().repr(obj)
+
+
+def dump(obj: object = missing) -> None:
+ """Print the object details to stdout._write (for the interactive
+ console of the web debugger.
+ """
+ gen = DebugReprGenerator()
+ if obj is missing:
+ rv = gen.dump_locals(sys._getframe(1).f_locals)
+ else:
+ rv = gen.dump_object(obj)
+ sys.stdout._write(rv) # type: ignore
+
+
+class _Helper:
+ """Displays an HTML version of the normal help, for the interactive
+ debugger only because it requires a patched sys.stdout.
+ """
+
+ def __repr__(self) -> str:
+ return "Type help(object) for help about object."
+
+ def __call__(self, topic: t.Any | None = None) -> None:
+ if topic is None:
+ sys.stdout._write(f"<span class=help>{self!r}</span>") # type: ignore
+ return
+ import pydoc
+
+ pydoc.help(topic)
+ rv = sys.stdout.reset() # type: ignore
+ paragraphs = _paragraph_re.split(rv)
+ if len(paragraphs) > 1:
+ title = paragraphs[0]
+ text = "\n\n".join(paragraphs[1:])
+ else:
+ title = "Help"
+ text = paragraphs[0]
+ sys.stdout._write(HELP_HTML % {"title": title, "text": text}) # type: ignore
+
+
+helper = _Helper()
+
+
+def _add_subclass_info(inner: str, obj: object, base: type | tuple[type, ...]) -> str:
+ if isinstance(base, tuple):
+ for cls in base:
+ if type(obj) is cls:
+ return inner
+ elif type(obj) is base:
+ return inner
+ module = ""
+ if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
+ module = f'<span class="module">{obj.__class__.__module__}.</span>'
+ return f"{module}{type(obj).__name__}({inner})"
+
+
+def _sequence_repr_maker(
+ left: str, right: str, base: type, limit: int = 8
+) -> t.Callable[[DebugReprGenerator, t.Iterable[t.Any], bool], str]:
+ def proxy(self: DebugReprGenerator, obj: t.Iterable[t.Any], recursive: bool) -> str:
+ if recursive:
+ return _add_subclass_info(f"{left}...{right}", obj, base)
+ buf = [left]
+ have_extended_section = False
+ for idx, item in enumerate(obj):
+ if idx:
+ buf.append(", ")
+ if idx == limit:
+ buf.append('<span class="extended">')
+ have_extended_section = True
+ buf.append(self.repr(item))
+ if have_extended_section:
+ buf.append("</span>")
+ buf.append(right)
+ return _add_subclass_info("".join(buf), obj, base)
+
+ return proxy
+
+
+class DebugReprGenerator:
+ def __init__(self) -> None:
+ self._stack: list[t.Any] = []
+
+ list_repr = _sequence_repr_maker("[", "]", list)
+ tuple_repr = _sequence_repr_maker("(", ")", tuple)
+ set_repr = _sequence_repr_maker("set([", "])", set)
+ frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
+ deque_repr = _sequence_repr_maker(
+ '<span class="module">collections.</span>deque([', "])", deque
+ )
+
+ def regex_repr(self, obj: t.Pattern[t.AnyStr]) -> str:
+ pattern = repr(obj.pattern)
+ pattern = codecs.decode(pattern, "unicode-escape", "ignore")
+ pattern = f"r{pattern}"
+ return f're.compile(<span class="string regex">{pattern}</span>)'
+
+ def string_repr(self, obj: str | bytes, limit: int = 70) -> str:
+ buf = ['<span class="string">']
+ r = repr(obj)
+
+ # shorten the repr when the hidden part would be at least 3 chars
+ if len(r) - limit > 2:
+ buf.extend(
+ (
+ escape(r[:limit]),
+ '<span class="extended">',
+ escape(r[limit:]),
+ "</span>",
+ )
+ )
+ else:
+ buf.append(escape(r))
+
+ buf.append("</span>")
+ out = "".join(buf)
+
+ # if the repr looks like a standard string, add subclass info if needed
+ if r[0] in "'\"" or (r[0] == "b" and r[1] in "'\""):
+ return _add_subclass_info(out, obj, (bytes, str))
+
+ # otherwise, assume the repr distinguishes the subclass already
+ return out
+
+ def dict_repr(
+ self,
+ d: dict[int, None] | dict[str, int] | dict[str | int, int],
+ recursive: bool,
+ limit: int = 5,
+ ) -> str:
+ if recursive:
+ return _add_subclass_info("{...}", d, dict)
+ buf = ["{"]
+ have_extended_section = False
+ for idx, (key, value) in enumerate(d.items()):
+ if idx:
+ buf.append(", ")
+ if idx == limit - 1:
+ buf.append('<span class="extended">')
+ have_extended_section = True
+ buf.append(
+ f'<span class="pair"><span class="key">{self.repr(key)}</span>:'
+ f' <span class="value">{self.repr(value)}</span></span>'
+ )
+ if have_extended_section:
+ buf.append("</span>")
+ buf.append("}")
+ return _add_subclass_info("".join(buf), d, dict)
+
+ def object_repr(self, obj: t.Any) -> str:
+ r = repr(obj)
+ return f'<span class="object">{escape(r)}</span>'
+
+ def dispatch_repr(self, obj: t.Any, recursive: bool) -> str:
+ if obj is helper:
+ return f'<span class="help">{helper!r}</span>'
+ if isinstance(obj, (int, float, complex)):
+ return f'<span class="number">{obj!r}</span>'
+ if isinstance(obj, str) or isinstance(obj, bytes):
+ return self.string_repr(obj)
+ if isinstance(obj, RegexType):
+ return self.regex_repr(obj)
+ if isinstance(obj, list):
+ return self.list_repr(obj, recursive)
+ if isinstance(obj, tuple):
+ return self.tuple_repr(obj, recursive)
+ if isinstance(obj, set):
+ return self.set_repr(obj, recursive)
+ if isinstance(obj, frozenset):
+ return self.frozenset_repr(obj, recursive)
+ if isinstance(obj, dict):
+ return self.dict_repr(obj, recursive)
+ if isinstance(obj, deque):
+ return self.deque_repr(obj, recursive)
+ return self.object_repr(obj)
+
+ def fallback_repr(self) -> str:
+ try:
+ info = "".join(format_exception_only(*sys.exc_info()[:2]))
+ except Exception:
+ info = "?"
+ return (
+ '<span class="brokenrepr">'
+ f"&lt;broken repr ({escape(info.strip())})&gt;</span>"
+ )
+
+ def repr(self, obj: object) -> str:
+ recursive = False
+ for item in self._stack:
+ if item is obj:
+ recursive = True
+ break
+ self._stack.append(obj)
+ try:
+ try:
+ return self.dispatch_repr(obj, recursive)
+ except Exception:
+ return self.fallback_repr()
+ finally:
+ self._stack.pop()
+
+ def dump_object(self, obj: object) -> str:
+ repr = None
+ items: list[tuple[str, str]] | None = None
+
+ if isinstance(obj, dict):
+ title = "Contents of"
+ items = []
+ for key, value in obj.items():
+ if not isinstance(key, str):
+ items = None
+ break
+ items.append((key, self.repr(value)))
+ if items is None:
+ items = []
+ repr = self.repr(obj)
+ for key in dir(obj):
+ try:
+ items.append((key, self.repr(getattr(obj, key))))
+ except Exception:
+ pass
+ title = "Details for"
+ title += f" {object.__repr__(obj)[1:-1]}"
+ return self.render_object_dump(items, title, repr)
+
+ def dump_locals(self, d: dict[str, t.Any]) -> str:
+ items = [(key, self.repr(value)) for key, value in d.items()]
+ return self.render_object_dump(items, "Local variables in frame")
+
+ def render_object_dump(
+ self, items: list[tuple[str, str]], title: str, repr: str | None = None
+ ) -> str:
+ html_items = []
+ for key, value in items:
+ html_items.append(f"<tr><th>{escape(key)}<td><pre class=repr>{value}</pre>")
+ if not html_items:
+ html_items.append("<tr><td><em>Nothing</em>")
+ return OBJECT_DUMP_HTML % {
+ "title": escape(title),
+ "repr": f"<pre class=repr>{repr if repr else ''}</pre>",
+ "items": "\n".join(html_items),
+ }
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/shared/ICON_LICENSE.md b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/ICON_LICENSE.md
new file mode 100644
index 0000000..3bdbfc7
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/ICON_LICENSE.md
@@ -0,0 +1,6 @@
+Silk icon set 1.3 by Mark James <mjames@gmail.com>
+
+http://www.famfamfam.com/lab/icons/silk/
+
+License: [CC-BY-2.5](https://creativecommons.org/licenses/by/2.5/)
+or [CC-BY-3.0](https://creativecommons.org/licenses/by/3.0/)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/shared/console.png b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/console.png
new file mode 100644
index 0000000..c28dd63
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/console.png
Binary files differ
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/shared/debugger.js b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/debugger.js
new file mode 100644
index 0000000..809b14a
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/debugger.js
@@ -0,0 +1,344 @@
+docReady(() => {
+ if (!EVALEX_TRUSTED) {
+ initPinBox();
+ }
+ // if we are in console mode, show the console.
+ if (CONSOLE_MODE && EVALEX) {
+ createInteractiveConsole();
+ }
+
+ const frames = document.querySelectorAll("div.traceback div.frame");
+ if (EVALEX) {
+ addConsoleIconToFrames(frames);
+ }
+ addEventListenersToElements(document.querySelectorAll("div.detail"), "click", () =>
+ document.querySelector("div.traceback").scrollIntoView(false)
+ );
+ addToggleFrameTraceback(frames);
+ addToggleTraceTypesOnClick(document.querySelectorAll("h2.traceback"));
+ addInfoPrompt(document.querySelectorAll("span.nojavascript"));
+ wrapPlainTraceback();
+});
+
+function addToggleFrameTraceback(frames) {
+ frames.forEach((frame) => {
+ frame.addEventListener("click", () => {
+ frame.getElementsByTagName("pre")[0].parentElement.classList.toggle("expanded");
+ });
+ })
+}
+
+
+function wrapPlainTraceback() {
+ const plainTraceback = document.querySelector("div.plain textarea");
+ const wrapper = document.createElement("pre");
+ const textNode = document.createTextNode(plainTraceback.textContent);
+ wrapper.appendChild(textNode);
+ plainTraceback.replaceWith(wrapper);
+}
+
+function makeDebugURL(args) {
+ const params = new URLSearchParams(args)
+ params.set("s", SECRET)
+ return `?__debugger__=yes&${params}`
+}
+
+function initPinBox() {
+ document.querySelector(".pin-prompt form").addEventListener(
+ "submit",
+ function (event) {
+ event.preventDefault();
+ const btn = this.btn;
+ btn.disabled = true;
+
+ fetch(
+ makeDebugURL({cmd: "pinauth", pin: this.pin.value})
+ )
+ .then((res) => res.json())
+ .then(({auth, exhausted}) => {
+ if (auth) {
+ EVALEX_TRUSTED = true;
+ fadeOut(document.getElementsByClassName("pin-prompt")[0]);
+ } else {
+ alert(
+ `Error: ${
+ exhausted
+ ? "too many attempts. Restart server to retry."
+ : "incorrect pin"
+ }`
+ );
+ }
+ })
+ .catch((err) => {
+ alert("Error: Could not verify PIN. Network error?");
+ console.error(err);
+ })
+ .finally(() => (btn.disabled = false));
+ },
+ false
+ );
+}
+
+function promptForPin() {
+ if (!EVALEX_TRUSTED) {
+ fetch(makeDebugURL({cmd: "printpin"}));
+ const pinPrompt = document.getElementsByClassName("pin-prompt")[0];
+ fadeIn(pinPrompt);
+ document.querySelector('.pin-prompt input[name="pin"]').focus();
+ }
+}
+
+/**
+ * Helper function for shell initialization
+ */
+function openShell(consoleNode, target, frameID) {
+ promptForPin();
+ if (consoleNode) {
+ slideToggle(consoleNode);
+ return consoleNode;
+ }
+ let historyPos = 0;
+ const history = [""];
+ const consoleElement = createConsole();
+ const output = createConsoleOutput();
+ const form = createConsoleInputForm();
+ const command = createConsoleInput();
+
+ target.parentNode.appendChild(consoleElement);
+ consoleElement.append(output);
+ consoleElement.append(form);
+ form.append(command);
+ command.focus();
+ slideToggle(consoleElement);
+
+ form.addEventListener("submit", (e) => {
+ handleConsoleSubmit(e, command, frameID).then((consoleOutput) => {
+ output.append(consoleOutput);
+ command.focus();
+ consoleElement.scrollTo(0, consoleElement.scrollHeight);
+ const old = history.pop();
+ history.push(command.value);
+ if (typeof old !== "undefined") {
+ history.push(old);
+ }
+ historyPos = history.length - 1;
+ command.value = "";
+ });
+ });
+
+ command.addEventListener("keydown", (e) => {
+ if (e.key === "l" && e.ctrlKey) {
+ output.innerText = "--- screen cleared ---";
+ } else if (e.key === "ArrowUp" || e.key === "ArrowDown") {
+ // Handle up arrow and down arrow.
+ if (e.key === "ArrowUp" && historyPos > 0) {
+ e.preventDefault();
+ historyPos--;
+ } else if (e.key === "ArrowDown" && historyPos < history.length - 1) {
+ historyPos++;
+ }
+ command.value = history[historyPos];
+ }
+ return false;
+ });
+
+ return consoleElement;
+}
+
+function addEventListenersToElements(elements, event, listener) {
+ elements.forEach((el) => el.addEventListener(event, listener));
+}
+
+/**
+ * Add extra info
+ */
+function addInfoPrompt(elements) {
+ for (let i = 0; i < elements.length; i++) {
+ elements[i].innerHTML =
+ "<p>To switch between the interactive traceback and the plaintext " +
+ 'one, you can click on the "Traceback" headline. From the text ' +
+ "traceback you can also create a paste of it. " +
+ (!EVALEX
+ ? ""
+ : "For code execution mouse-over the frame you want to debug and " +
+ "click on the console icon on the right side." +
+ "<p>You can execute arbitrary Python code in the stack frames and " +
+ "there are some extra helpers available for introspection:" +
+ "<ul><li><code>dump()</code> shows all variables in the frame" +
+ "<li><code>dump(obj)</code> dumps all that's known about the object</ul>");
+ elements[i].classList.remove("nojavascript");
+ }
+}
+
+function addConsoleIconToFrames(frames) {
+ for (let i = 0; i < frames.length; i++) {
+ let consoleNode = null;
+ const target = frames[i];
+ const frameID = frames[i].id.substring(6);
+
+ for (let j = 0; j < target.getElementsByTagName("pre").length; j++) {
+ const img = createIconForConsole();
+ img.addEventListener("click", (e) => {
+ e.stopPropagation();
+ consoleNode = openShell(consoleNode, target, frameID);
+ return false;
+ });
+ target.getElementsByTagName("pre")[j].append(img);
+ }
+ }
+}
+
+function slideToggle(target) {
+ target.classList.toggle("active");
+}
+
+/**
+ * toggle traceback types on click.
+ */
+function addToggleTraceTypesOnClick(elements) {
+ for (let i = 0; i < elements.length; i++) {
+ elements[i].addEventListener("click", () => {
+ document.querySelector("div.traceback").classList.toggle("hidden");
+ document.querySelector("div.plain").classList.toggle("hidden");
+ });
+ elements[i].style.cursor = "pointer";
+ document.querySelector("div.plain").classList.toggle("hidden");
+ }
+}
+
+function createConsole() {
+ const consoleNode = document.createElement("pre");
+ consoleNode.classList.add("console");
+ consoleNode.classList.add("active");
+ return consoleNode;
+}
+
+function createConsoleOutput() {
+ const output = document.createElement("div");
+ output.classList.add("output");
+ output.innerHTML = "[console ready]";
+ return output;
+}
+
+function createConsoleInputForm() {
+ const form = document.createElement("form");
+ form.innerHTML = "&gt;&gt;&gt; ";
+ return form;
+}
+
+function createConsoleInput() {
+ const command = document.createElement("input");
+ command.type = "text";
+ command.setAttribute("autocomplete", "off");
+ command.setAttribute("spellcheck", false);
+ command.setAttribute("autocapitalize", "off");
+ command.setAttribute("autocorrect", "off");
+ return command;
+}
+
+function createIconForConsole() {
+ const img = document.createElement("img");
+ img.setAttribute("src", makeDebugURL({cmd: "resource", f: "console.png"}));
+ img.setAttribute("title", "Open an interactive python shell in this frame");
+ return img;
+}
+
+function createExpansionButtonForConsole() {
+ const expansionButton = document.createElement("a");
+ expansionButton.setAttribute("href", "#");
+ expansionButton.setAttribute("class", "toggle");
+ expansionButton.innerHTML = "&nbsp;&nbsp;";
+ return expansionButton;
+}
+
+function createInteractiveConsole() {
+ const target = document.querySelector("div.console div.inner");
+ while (target.firstChild) {
+ target.removeChild(target.firstChild);
+ }
+ openShell(null, target, 0);
+}
+
+function handleConsoleSubmit(e, command, frameID) {
+ // Prevent page from refreshing.
+ e.preventDefault();
+
+ return new Promise((resolve) => {
+ fetch(makeDebugURL({cmd: command.value, frm: frameID}))
+ .then((res) => {
+ return res.text();
+ })
+ .then((data) => {
+ const tmp = document.createElement("div");
+ tmp.innerHTML = data;
+ resolve(tmp);
+
+ // Handle expandable span for long list outputs.
+ // Example to test: list(range(13))
+ let wrapperAdded = false;
+ const wrapperSpan = document.createElement("span");
+ const expansionButton = createExpansionButtonForConsole();
+
+ tmp.querySelectorAll("span.extended").forEach((spanToWrap) => {
+ const parentDiv = spanToWrap.parentNode;
+ if (!wrapperAdded) {
+ parentDiv.insertBefore(wrapperSpan, spanToWrap);
+ wrapperAdded = true;
+ }
+ parentDiv.removeChild(spanToWrap);
+ wrapperSpan.append(spanToWrap);
+ spanToWrap.hidden = true;
+
+ expansionButton.addEventListener("click", (event) => {
+ event.preventDefault();
+ spanToWrap.hidden = !spanToWrap.hidden;
+ expansionButton.classList.toggle("open");
+ return false;
+ });
+ });
+
+ // Add expansion button at end of wrapper.
+ if (wrapperAdded) {
+ wrapperSpan.append(expansionButton);
+ }
+ })
+ .catch((err) => {
+ console.error(err);
+ });
+ return false;
+ });
+}
+
+function fadeOut(element) {
+ element.style.opacity = 1;
+
+ (function fade() {
+ element.style.opacity -= 0.1;
+ if (element.style.opacity < 0) {
+ element.style.display = "none";
+ } else {
+ requestAnimationFrame(fade);
+ }
+ })();
+}
+
+function fadeIn(element, display) {
+ element.style.opacity = 0;
+ element.style.display = display || "block";
+
+ (function fade() {
+ let val = parseFloat(element.style.opacity) + 0.1;
+ if (val <= 1) {
+ element.style.opacity = val;
+ requestAnimationFrame(fade);
+ }
+ })();
+}
+
+function docReady(fn) {
+ if (document.readyState === "complete" || document.readyState === "interactive") {
+ setTimeout(fn, 1);
+ } else {
+ document.addEventListener("DOMContentLoaded", fn);
+ }
+}
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/shared/less.png b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/less.png
new file mode 100644
index 0000000..5efefd6
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/less.png
Binary files differ
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/shared/more.png b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/more.png
new file mode 100644
index 0000000..804fa22
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/more.png
Binary files differ
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/shared/style.css b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/style.css
new file mode 100644
index 0000000..e9397ca
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/shared/style.css
@@ -0,0 +1,150 @@
+body, input { font-family: sans-serif; color: #000; text-align: center;
+ margin: 1em; padding: 0; font-size: 15px; }
+h1, h2, h3 { font-weight: normal; }
+
+input { background-color: #fff; margin: 0; text-align: left;
+ outline: none !important; }
+input[type="submit"] { padding: 3px 6px; }
+a { color: #11557C; }
+a:hover { color: #177199; }
+pre, code,
+textarea { font-family: monospace; font-size: 14px; }
+
+div.debugger { text-align: left; padding: 12px; margin: auto;
+ background-color: white; }
+h1 { font-size: 36px; margin: 0 0 0.3em 0; }
+div.detail { cursor: pointer; }
+div.detail p { margin: 0 0 8px 13px; font-size: 14px; white-space: pre-wrap;
+ font-family: monospace; }
+div.explanation { margin: 20px 13px; font-size: 15px; color: #555; }
+div.footer { font-size: 13px; text-align: right; margin: 30px 0;
+ color: #86989B; }
+
+h2 { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 9px;
+ background-color: #11557C; color: white; }
+h2 em, h3 em { font-style: normal; color: #A5D6D9; font-weight: normal; }
+
+div.traceback, div.plain { border: 1px solid #ddd; margin: 0 0 1em 0; padding: 10px; }
+div.plain p { margin: 0; }
+div.plain textarea,
+div.plain pre { margin: 10px 0 0 0; padding: 4px;
+ background-color: #E8EFF0; border: 1px solid #D3E7E9; }
+div.plain textarea { width: 99%; height: 300px; }
+div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
+div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
+div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
+div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
+ background-color: #E8EFF0; border: 1px solid #D3E7E9; }
+div.traceback .library .current { background: white; color: #555; }
+div.traceback .expanded .current { background: #E8EFF0; color: black; }
+div.traceback pre:hover { background-color: #DDECEE; color: black; cursor: pointer; }
+div.traceback div.source.expanded pre + pre { border-top: none; }
+
+div.traceback span.ws { display: none; }
+div.traceback pre.before, div.traceback pre.after { display: none; background: white; }
+div.traceback div.source.expanded pre.before,
+div.traceback div.source.expanded pre.after {
+ display: block;
+}
+
+div.traceback div.source.expanded span.ws {
+ display: inline;
+}
+
+div.traceback blockquote { margin: 1em 0 0 0; padding: 0; white-space: pre-line; }
+div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
+div.traceback img:hover { background-color: #ddd; cursor: pointer;
+ border-color: #BFDDE0; }
+div.traceback pre:hover img { display: block; }
+div.traceback cite.filename { font-style: normal; color: #3B666B; }
+
+pre.console { border: 1px solid #ccc; background: white!important;
+ color: black; padding: 5px!important;
+ margin: 3px 0 0 0!important; cursor: default!important;
+ max-height: 400px; overflow: auto; }
+pre.console form { color: #555; }
+pre.console input { background-color: transparent; color: #555;
+ width: 90%; font-family: monospace; font-size: 14px;
+ border: none!important; }
+
+span.string { color: #30799B; }
+span.number { color: #9C1A1C; }
+span.help { color: #3A7734; }
+span.object { color: #485F6E; }
+span.extended { opacity: 0.5; }
+span.extended:hover { opacity: 1; }
+a.toggle { text-decoration: none; background-repeat: no-repeat;
+ background-position: center center;
+ background-image: url(?__debugger__=yes&cmd=resource&f=more.png); }
+a.toggle:hover { background-color: #444; }
+a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
+
+pre.console div.traceback,
+pre.console div.box { margin: 5px 10px; white-space: normal;
+ border: 1px solid #11557C; padding: 10px;
+ font-family: sans-serif; }
+pre.console div.box h3,
+pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
+ background: #11557C; color: white; }
+
+pre.console div.traceback pre:hover { cursor: default; background: #E8EFF0; }
+pre.console div.traceback pre.syntaxerror { background: inherit; border: none;
+ margin: 20px -10px -10px -10px;
+ padding: 10px; border-top: 1px solid #BFDDE0;
+ background: #E8EFF0; }
+pre.console div.noframe-traceback pre.syntaxerror { margin-top: -10px; border: none; }
+
+pre.console div.box pre.repr { padding: 0; margin: 0; background-color: white; border: none; }
+pre.console div.box table { margin-top: 6px; }
+pre.console div.box pre { border: none; }
+pre.console div.box pre.help { background-color: white; }
+pre.console div.box pre.help:hover { cursor: default; }
+pre.console table tr { vertical-align: top; }
+div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
+
+div.traceback pre, div.console pre {
+ white-space: pre-wrap; /* css-3 should we be so lucky... */
+ white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
+ white-space: -pre-wrap; /* Opera 4-6 ?? */
+ white-space: -o-pre-wrap; /* Opera 7 ?? */
+ word-wrap: break-word; /* Internet Explorer 5.5+ */
+ _white-space: pre; /* IE only hack to re-specify in
+ addition to word-wrap */
+}
+
+
+div.pin-prompt {
+ position: absolute;
+ display: none;
+ top: 0;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ background: rgba(255, 255, 255, 0.8);
+}
+
+div.pin-prompt .inner {
+ background: #eee;
+ padding: 10px 50px;
+ width: 350px;
+ margin: 10% auto 0 auto;
+ border: 1px solid #ccc;
+ border-radius: 2px;
+}
+
+div.exc-divider {
+ margin: 0.7em 0 0 -1em;
+ padding: 0.5em;
+ background: #11557C;
+ color: #ddd;
+ border: 1px solid #ddd;
+}
+
+.console.active {
+ max-height: 0!important;
+ display: none;
+}
+
+.hidden {
+ display: none;
+}
diff --git a/venv/lib/python3.8/site-packages/werkzeug/debug/tbtools.py b/venv/lib/python3.8/site-packages/werkzeug/debug/tbtools.py
new file mode 100644
index 0000000..e81ed6e
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/debug/tbtools.py
@@ -0,0 +1,450 @@
+from __future__ import annotations
+
+import itertools
+import linecache
+import os
+import re
+import sys
+import sysconfig
+import traceback
+import typing as t
+
+from markupsafe import escape
+
+from ..utils import cached_property
+from .console import Console
+
+HEADER = """\
+<!doctype html>
+<html lang=en>
+ <head>
+ <title>%(title)s // Werkzeug Debugger</title>
+ <link rel="stylesheet" href="?__debugger__=yes&amp;cmd=resource&amp;f=style.css">
+ <link rel="shortcut icon"
+ href="?__debugger__=yes&amp;cmd=resource&amp;f=console.png">
+ <script src="?__debugger__=yes&amp;cmd=resource&amp;f=debugger.js"></script>
+ <script>
+ var CONSOLE_MODE = %(console)s,
+ EVALEX = %(evalex)s,
+ EVALEX_TRUSTED = %(evalex_trusted)s,
+ SECRET = "%(secret)s";
+ </script>
+ </head>
+ <body style="background-color: #fff">
+ <div class="debugger">
+"""
+
+FOOTER = """\
+ <div class="footer">
+ Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
+ friendly Werkzeug powered traceback interpreter.
+ </div>
+ </div>
+
+ <div class="pin-prompt">
+ <div class="inner">
+ <h3>Console Locked</h3>
+ <p>
+ The console is locked and needs to be unlocked by entering the PIN.
+ You can find the PIN printed out on the standard output of your
+ shell that runs the server.
+ <form>
+ <p>PIN:
+ <input type=text name=pin size=14>
+ <input type=submit name=btn value="Confirm Pin">
+ </form>
+ </div>
+ </div>
+ </body>
+</html>
+"""
+
+PAGE_HTML = (
+ HEADER
+ + """\
+<h1>%(exception_type)s</h1>
+<div class="detail">
+ <p class="errormsg">%(exception)s</p>
+</div>
+<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
+%(summary)s
+<div class="plain">
+ <p>
+ This is the Copy/Paste friendly version of the traceback.
+ </p>
+ <textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
+</div>
+<div class="explanation">
+ The debugger caught an exception in your WSGI application. You can now
+ look at the traceback which led to the error. <span class="nojavascript">
+ If you enable JavaScript you can also use additional features such as code
+ execution (if the evalex feature is enabled), automatic pasting of the
+ exceptions and much more.</span>
+</div>
+"""
+ + FOOTER
+ + """
+<!--
+
+%(plaintext_cs)s
+
+-->
+"""
+)
+
+CONSOLE_HTML = (
+ HEADER
+ + """\
+<h1>Interactive Console</h1>
+<div class="explanation">
+In this console you can execute Python expressions in the context of the
+application. The initial namespace was created by the debugger automatically.
+</div>
+<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
+"""
+ + FOOTER
+)
+
+SUMMARY_HTML = """\
+<div class="%(classes)s">
+ %(title)s
+ <ul>%(frames)s</ul>
+ %(description)s
+</div>
+"""
+
+FRAME_HTML = """\
+<div class="frame" id="frame-%(id)d">
+ <h4>File <cite class="filename">"%(filename)s"</cite>,
+ line <em class="line">%(lineno)s</em>,
+ in <code class="function">%(function_name)s</code></h4>
+ <div class="source %(library)s">%(lines)s</div>
+</div>
+"""
+
+
+def _process_traceback(
+ exc: BaseException,
+ te: traceback.TracebackException | None = None,
+ *,
+ skip: int = 0,
+ hide: bool = True,
+) -> traceback.TracebackException:
+ if te is None:
+ te = traceback.TracebackException.from_exception(exc, lookup_lines=False)
+
+ # Get the frames the same way StackSummary.extract did, in order
+ # to match each frame with the FrameSummary to augment.
+ frame_gen = traceback.walk_tb(exc.__traceback__)
+ limit = getattr(sys, "tracebacklimit", None)
+
+ if limit is not None:
+ if limit < 0:
+ limit = 0
+
+ frame_gen = itertools.islice(frame_gen, limit)
+
+ if skip:
+ frame_gen = itertools.islice(frame_gen, skip, None)
+ del te.stack[:skip]
+
+ new_stack: list[DebugFrameSummary] = []
+ hidden = False
+
+ # Match each frame with the FrameSummary that was generated.
+ # Hide frames using Paste's __traceback_hide__ rules. Replace
+ # all visible FrameSummary with DebugFrameSummary.
+ for (f, _), fs in zip(frame_gen, te.stack):
+ if hide:
+ hide_value = f.f_locals.get("__traceback_hide__", False)
+
+ if hide_value in {"before", "before_and_this"}:
+ new_stack = []
+ hidden = False
+
+ if hide_value == "before_and_this":
+ continue
+ elif hide_value in {"reset", "reset_and_this"}:
+ hidden = False
+
+ if hide_value == "reset_and_this":
+ continue
+ elif hide_value in {"after", "after_and_this"}:
+ hidden = True
+
+ if hide_value == "after_and_this":
+ continue
+ elif hide_value or hidden:
+ continue
+
+ frame_args: dict[str, t.Any] = {
+ "filename": fs.filename,
+ "lineno": fs.lineno,
+ "name": fs.name,
+ "locals": f.f_locals,
+ "globals": f.f_globals,
+ }
+
+ if hasattr(fs, "colno"):
+ frame_args["colno"] = fs.colno
+ frame_args["end_colno"] = fs.end_colno
+
+ new_stack.append(DebugFrameSummary(**frame_args))
+
+ # The codeop module is used to compile code from the interactive
+ # debugger. Hide any codeop frames from the bottom of the traceback.
+ while new_stack:
+ module = new_stack[0].global_ns.get("__name__")
+
+ if module is None:
+ module = new_stack[0].local_ns.get("__name__")
+
+ if module == "codeop":
+ del new_stack[0]
+ else:
+ break
+
+ te.stack[:] = new_stack
+
+ if te.__context__:
+ context_exc = t.cast(BaseException, exc.__context__)
+ te.__context__ = _process_traceback(context_exc, te.__context__, hide=hide)
+
+ if te.__cause__:
+ cause_exc = t.cast(BaseException, exc.__cause__)
+ te.__cause__ = _process_traceback(cause_exc, te.__cause__, hide=hide)
+
+ return te
+
+
+class DebugTraceback:
+ __slots__ = ("_te", "_cache_all_tracebacks", "_cache_all_frames")
+
+ def __init__(
+ self,
+ exc: BaseException,
+ te: traceback.TracebackException | None = None,
+ *,
+ skip: int = 0,
+ hide: bool = True,
+ ) -> None:
+ self._te = _process_traceback(exc, te, skip=skip, hide=hide)
+
+ def __str__(self) -> str:
+ return f"<{type(self).__name__} {self._te}>"
+
+ @cached_property
+ def all_tracebacks(
+ self,
+ ) -> list[tuple[str | None, traceback.TracebackException]]:
+ out = []
+ current = self._te
+
+ while current is not None:
+ if current.__cause__ is not None:
+ chained_msg = (
+ "The above exception was the direct cause of the"
+ " following exception"
+ )
+ chained_exc = current.__cause__
+ elif current.__context__ is not None and not current.__suppress_context__:
+ chained_msg = (
+ "During handling of the above exception, another"
+ " exception occurred"
+ )
+ chained_exc = current.__context__
+ else:
+ chained_msg = None
+ chained_exc = None
+
+ out.append((chained_msg, current))
+ current = chained_exc
+
+ return out
+
+ @cached_property
+ def all_frames(self) -> list[DebugFrameSummary]:
+ return [
+ f # type: ignore[misc]
+ for _, te in self.all_tracebacks
+ for f in te.stack
+ ]
+
+ def render_traceback_text(self) -> str:
+ return "".join(self._te.format())
+
+ def render_traceback_html(self, include_title: bool = True) -> str:
+ library_frames = [f.is_library for f in self.all_frames]
+ mark_library = 0 < sum(library_frames) < len(library_frames)
+ rows = []
+
+ if not library_frames:
+ classes = "traceback noframe-traceback"
+ else:
+ classes = "traceback"
+
+ for msg, current in reversed(self.all_tracebacks):
+ row_parts = []
+
+ if msg is not None:
+ row_parts.append(f'<li><div class="exc-divider">{msg}:</div>')
+
+ for frame in current.stack:
+ frame = t.cast(DebugFrameSummary, frame)
+ info = f' title="{escape(frame.info)}"' if frame.info else ""
+ row_parts.append(f"<li{info}>{frame.render_html(mark_library)}")
+
+ rows.append("\n".join(row_parts))
+
+ if sys.version_info < (3, 13):
+ exc_type_str = self._te.exc_type.__name__
+ else:
+ exc_type_str = self._te.exc_type_str
+
+ is_syntax_error = exc_type_str == "SyntaxError"
+
+ if include_title:
+ if is_syntax_error:
+ title = "Syntax Error"
+ else:
+ title = "Traceback <em>(most recent call last)</em>:"
+ else:
+ title = ""
+
+ exc_full = escape("".join(self._te.format_exception_only()))
+
+ if is_syntax_error:
+ description = f"<pre class=syntaxerror>{exc_full}</pre>"
+ else:
+ description = f"<blockquote>{exc_full}</blockquote>"
+
+ return SUMMARY_HTML % {
+ "classes": classes,
+ "title": f"<h3>{title}</h3>",
+ "frames": "\n".join(rows),
+ "description": description,
+ }
+
+ def render_debugger_html(
+ self, evalex: bool, secret: str, evalex_trusted: bool
+ ) -> str:
+ exc_lines = list(self._te.format_exception_only())
+ plaintext = "".join(self._te.format())
+
+ if sys.version_info < (3, 13):
+ exc_type_str = self._te.exc_type.__name__
+ else:
+ exc_type_str = self._te.exc_type_str
+
+ return PAGE_HTML % {
+ "evalex": "true" if evalex else "false",
+ "evalex_trusted": "true" if evalex_trusted else "false",
+ "console": "false",
+ "title": escape(exc_lines[0]),
+ "exception": escape("".join(exc_lines)),
+ "exception_type": escape(exc_type_str),
+ "summary": self.render_traceback_html(include_title=False),
+ "plaintext": escape(plaintext),
+ "plaintext_cs": re.sub("-{2,}", "-", plaintext),
+ "secret": secret,
+ }
+
+
+class DebugFrameSummary(traceback.FrameSummary):
+ """A :class:`traceback.FrameSummary` that can evaluate code in the
+ frame's namespace.
+ """
+
+ __slots__ = (
+ "local_ns",
+ "global_ns",
+ "_cache_info",
+ "_cache_is_library",
+ "_cache_console",
+ )
+
+ def __init__(
+ self,
+ *,
+ locals: dict[str, t.Any],
+ globals: dict[str, t.Any],
+ **kwargs: t.Any,
+ ) -> None:
+ super().__init__(locals=None, **kwargs)
+ self.local_ns = locals
+ self.global_ns = globals
+
+ @cached_property
+ def info(self) -> str | None:
+ return self.local_ns.get("__traceback_info__")
+
+ @cached_property
+ def is_library(self) -> bool:
+ return any(
+ self.filename.startswith((path, os.path.realpath(path)))
+ for path in sysconfig.get_paths().values()
+ )
+
+ @cached_property
+ def console(self) -> Console:
+ return Console(self.global_ns, self.local_ns)
+
+ def eval(self, code: str) -> t.Any:
+ return self.console.eval(code)
+
+ def render_html(self, mark_library: bool) -> str:
+ context = 5
+ lines = linecache.getlines(self.filename)
+ line_idx = self.lineno - 1 # type: ignore[operator]
+ start_idx = max(0, line_idx - context)
+ stop_idx = min(len(lines), line_idx + context + 1)
+ rendered_lines = []
+
+ def render_line(line: str, cls: str) -> None:
+ line = line.expandtabs().rstrip()
+ stripped_line = line.strip()
+ prefix = len(line) - len(stripped_line)
+ colno = getattr(self, "colno", 0)
+ end_colno = getattr(self, "end_colno", 0)
+
+ if cls == "current" and colno and end_colno:
+ arrow = (
+ f'\n<span class="ws">{" " * prefix}</span>'
+ f'{" " * (colno - prefix)}{"^" * (end_colno - colno)}'
+ )
+ else:
+ arrow = ""
+
+ rendered_lines.append(
+ f'<pre class="line {cls}"><span class="ws">{" " * prefix}</span>'
+ f"{escape(stripped_line) if stripped_line else ' '}"
+ f"{arrow if arrow else ''}</pre>"
+ )
+
+ if lines:
+ for line in lines[start_idx:line_idx]:
+ render_line(line, "before")
+
+ render_line(lines[line_idx], "current")
+
+ for line in lines[line_idx + 1 : stop_idx]:
+ render_line(line, "after")
+
+ return FRAME_HTML % {
+ "id": id(self),
+ "filename": escape(self.filename),
+ "lineno": self.lineno,
+ "function_name": escape(self.name),
+ "lines": "\n".join(rendered_lines),
+ "library": "library" if mark_library and self.is_library else "",
+ }
+
+
+def render_console_html(secret: str, evalex_trusted: bool) -> str:
+ return CONSOLE_HTML % {
+ "evalex": "true",
+ "evalex_trusted": "true" if evalex_trusted else "false",
+ "console": "true",
+ "title": "Console",
+ "secret": secret,
+ }
diff --git a/venv/lib/python3.8/site-packages/werkzeug/exceptions.py b/venv/lib/python3.8/site-packages/werkzeug/exceptions.py
new file mode 100644
index 0000000..6ce7ef9
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/exceptions.py
@@ -0,0 +1,881 @@
+"""Implements a number of Python exceptions which can be raised from within
+a view to trigger a standard HTTP non-200 response.
+
+Usage Example
+-------------
+
+.. code-block:: python
+
+ from werkzeug.wrappers.request import Request
+ from werkzeug.exceptions import HTTPException, NotFound
+
+ def view(request):
+ raise NotFound()
+
+ @Request.application
+ def application(request):
+ try:
+ return view(request)
+ except HTTPException as e:
+ return e
+
+As you can see from this example those exceptions are callable WSGI
+applications. However, they are not Werkzeug response objects. You
+can get a response object by calling ``get_response()`` on a HTTP
+exception.
+
+Keep in mind that you may have to pass an environ (WSGI) or scope
+(ASGI) to ``get_response()`` because some errors fetch additional
+information relating to the request.
+
+If you want to hook in a different exception page to say, a 404 status
+code, you can add a second except for a specific subclass of an error:
+
+.. code-block:: python
+
+ @Request.application
+ def application(request):
+ try:
+ return view(request)
+ except NotFound as e:
+ return not_found(request)
+ except HTTPException as e:
+ return e
+
+"""
+
+from __future__ import annotations
+
+import typing as t
+from datetime import datetime
+
+from markupsafe import escape
+from markupsafe import Markup
+
+from ._internal import _get_environ
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from .datastructures import WWWAuthenticate
+ from .sansio.response import Response
+ from .wrappers.request import Request as WSGIRequest
+ from .wrappers.response import Response as WSGIResponse
+
+
+class HTTPException(Exception):
+ """The base class for all HTTP exceptions. This exception can be called as a WSGI
+ application to render a default error page or you can catch the subclasses
+ of it independently and render nicer error messages.
+
+ .. versionchanged:: 2.1
+ Removed the ``wrap`` class method.
+ """
+
+ code: int | None = None
+ description: str | None = None
+
+ def __init__(
+ self,
+ description: str | None = None,
+ response: Response | None = None,
+ ) -> None:
+ super().__init__()
+ if description is not None:
+ self.description = description
+ self.response = response
+
+ @property
+ def name(self) -> str:
+ """The status name."""
+ from .http import HTTP_STATUS_CODES
+
+ return HTTP_STATUS_CODES.get(self.code, "Unknown Error") # type: ignore
+
+ def get_description(
+ self,
+ environ: WSGIEnvironment | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> str:
+ """Get the description."""
+ if self.description is None:
+ description = ""
+ else:
+ description = self.description
+
+ description = escape(description).replace("\n", Markup("<br>"))
+ return f"<p>{description}</p>"
+
+ def get_body(
+ self,
+ environ: WSGIEnvironment | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> str:
+ """Get the HTML body."""
+ return (
+ "<!doctype html>\n"
+ "<html lang=en>\n"
+ f"<title>{self.code} {escape(self.name)}</title>\n"
+ f"<h1>{escape(self.name)}</h1>\n"
+ f"{self.get_description(environ)}\n"
+ )
+
+ def get_headers(
+ self,
+ environ: WSGIEnvironment | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> list[tuple[str, str]]:
+ """Get a list of headers."""
+ return [("Content-Type", "text/html; charset=utf-8")]
+
+ def get_response(
+ self,
+ environ: WSGIEnvironment | WSGIRequest | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> Response:
+ """Get a response object. If one was passed to the exception
+ it's returned directly.
+
+ :param environ: the optional environ for the request. This
+ can be used to modify the response depending
+ on how the request looked like.
+ :return: a :class:`Response` object or a subclass thereof.
+ """
+ from .wrappers.response import Response as WSGIResponse # noqa: F811
+
+ if self.response is not None:
+ return self.response
+ if environ is not None:
+ environ = _get_environ(environ)
+ headers = self.get_headers(environ, scope)
+ return WSGIResponse(self.get_body(environ, scope), self.code, headers)
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ """Call the exception as WSGI application.
+
+ :param environ: the WSGI environment.
+ :param start_response: the response callable provided by the WSGI
+ server.
+ """
+ response = t.cast("WSGIResponse", self.get_response(environ))
+ return response(environ, start_response)
+
+ def __str__(self) -> str:
+ code = self.code if self.code is not None else "???"
+ return f"{code} {self.name}: {self.description}"
+
+ def __repr__(self) -> str:
+ code = self.code if self.code is not None else "???"
+ return f"<{type(self).__name__} '{code}: {self.name}'>"
+
+
+class BadRequest(HTTPException):
+ """*400* `Bad Request`
+
+ Raise if the browser sends something to the application the application
+ or server cannot handle.
+ """
+
+ code = 400
+ description = (
+ "The browser (or proxy) sent a request that this server could "
+ "not understand."
+ )
+
+
+class BadRequestKeyError(BadRequest, KeyError):
+ """An exception that is used to signal both a :exc:`KeyError` and a
+ :exc:`BadRequest`. Used by many of the datastructures.
+ """
+
+ _description = BadRequest.description
+ #: Show the KeyError along with the HTTP error message in the
+ #: response. This should be disabled in production, but can be
+ #: useful in a debug mode.
+ show_exception = False
+
+ def __init__(self, arg: str | None = None, *args: t.Any, **kwargs: t.Any):
+ super().__init__(*args, **kwargs)
+
+ if arg is None:
+ KeyError.__init__(self)
+ else:
+ KeyError.__init__(self, arg)
+
+ @property # type: ignore
+ def description(self) -> str:
+ if self.show_exception:
+ return (
+ f"{self._description}\n"
+ f"{KeyError.__name__}: {KeyError.__str__(self)}"
+ )
+
+ return self._description
+
+ @description.setter
+ def description(self, value: str) -> None:
+ self._description = value
+
+
+class ClientDisconnected(BadRequest):
+ """Internal exception that is raised if Werkzeug detects a disconnected
+ client. Since the client is already gone at that point attempting to
+ send the error message to the client might not work and might ultimately
+ result in another exception in the server. Mainly this is here so that
+ it is silenced by default as far as Werkzeug is concerned.
+
+ Since disconnections cannot be reliably detected and are unspecified
+ by WSGI to a large extent this might or might not be raised if a client
+ is gone.
+
+ .. versionadded:: 0.8
+ """
+
+
+class SecurityError(BadRequest):
+ """Raised if something triggers a security error. This is otherwise
+ exactly like a bad request error.
+
+ .. versionadded:: 0.9
+ """
+
+
+class BadHost(BadRequest):
+ """Raised if the submitted host is badly formatted.
+
+ .. versionadded:: 0.11.2
+ """
+
+
+class Unauthorized(HTTPException):
+ """*401* ``Unauthorized``
+
+ Raise if the user is not authorized to access a resource.
+
+ The ``www_authenticate`` argument should be used to set the
+ ``WWW-Authenticate`` header. This is used for HTTP basic auth and
+ other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
+ to create correctly formatted values. Strictly speaking a 401
+ response is invalid if it doesn't provide at least one value for
+ this header, although real clients typically don't care.
+
+ :param description: Override the default message used for the body
+ of the response.
+ :param www-authenticate: A single value, or list of values, for the
+ WWW-Authenticate header(s).
+
+ .. versionchanged:: 2.0
+ Serialize multiple ``www_authenticate`` items into multiple
+ ``WWW-Authenticate`` headers, rather than joining them
+ into a single value, for better interoperability.
+
+ .. versionchanged:: 0.15.3
+ If the ``www_authenticate`` argument is not set, the
+ ``WWW-Authenticate`` header is not set.
+
+ .. versionchanged:: 0.15.3
+ The ``response`` argument was restored.
+
+ .. versionchanged:: 0.15.1
+ ``description`` was moved back as the first argument, restoring
+ its previous position.
+
+ .. versionchanged:: 0.15.0
+ ``www_authenticate`` was added as the first argument, ahead of
+ ``description``.
+ """
+
+ code = 401
+ description = (
+ "The server could not verify that you are authorized to access"
+ " the URL requested. You either supplied the wrong credentials"
+ " (e.g. a bad password), or your browser doesn't understand"
+ " how to supply the credentials required."
+ )
+
+ def __init__(
+ self,
+ description: str | None = None,
+ response: Response | None = None,
+ www_authenticate: None | (WWWAuthenticate | t.Iterable[WWWAuthenticate]) = None,
+ ) -> None:
+ super().__init__(description, response)
+
+ from .datastructures import WWWAuthenticate
+
+ if isinstance(www_authenticate, WWWAuthenticate):
+ www_authenticate = (www_authenticate,)
+
+ self.www_authenticate = www_authenticate
+
+ def get_headers(
+ self,
+ environ: WSGIEnvironment | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> list[tuple[str, str]]:
+ headers = super().get_headers(environ, scope)
+ if self.www_authenticate:
+ headers.extend(("WWW-Authenticate", str(x)) for x in self.www_authenticate)
+ return headers
+
+
+class Forbidden(HTTPException):
+ """*403* `Forbidden`
+
+ Raise if the user doesn't have the permission for the requested resource
+ but was authenticated.
+ """
+
+ code = 403
+ description = (
+ "You don't have the permission to access the requested"
+ " resource. It is either read-protected or not readable by the"
+ " server."
+ )
+
+
+class NotFound(HTTPException):
+ """*404* `Not Found`
+
+ Raise if a resource does not exist and never existed.
+ """
+
+ code = 404
+ description = (
+ "The requested URL was not found on the server. If you entered"
+ " the URL manually please check your spelling and try again."
+ )
+
+
+class MethodNotAllowed(HTTPException):
+ """*405* `Method Not Allowed`
+
+ Raise if the server used a method the resource does not handle. For
+ example `POST` if the resource is view only. Especially useful for REST.
+
+ The first argument for this exception should be a list of allowed methods.
+ Strictly speaking the response would be invalid if you don't provide valid
+ methods in the header which you can do with that list.
+ """
+
+ code = 405
+ description = "The method is not allowed for the requested URL."
+
+ def __init__(
+ self,
+ valid_methods: t.Iterable[str] | None = None,
+ description: str | None = None,
+ response: Response | None = None,
+ ) -> None:
+ """Takes an optional list of valid http methods
+ starting with werkzeug 0.3 the list will be mandatory."""
+ super().__init__(description=description, response=response)
+ self.valid_methods = valid_methods
+
+ def get_headers(
+ self,
+ environ: WSGIEnvironment | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> list[tuple[str, str]]:
+ headers = super().get_headers(environ, scope)
+ if self.valid_methods:
+ headers.append(("Allow", ", ".join(self.valid_methods)))
+ return headers
+
+
+class NotAcceptable(HTTPException):
+ """*406* `Not Acceptable`
+
+ Raise if the server can't return any content conforming to the
+ `Accept` headers of the client.
+ """
+
+ code = 406
+ description = (
+ "The resource identified by the request is only capable of"
+ " generating response entities which have content"
+ " characteristics not acceptable according to the accept"
+ " headers sent in the request."
+ )
+
+
+class RequestTimeout(HTTPException):
+ """*408* `Request Timeout`
+
+ Raise to signalize a timeout.
+ """
+
+ code = 408
+ description = (
+ "The server closed the network connection because the browser"
+ " didn't finish the request within the specified time."
+ )
+
+
+class Conflict(HTTPException):
+ """*409* `Conflict`
+
+ Raise to signal that a request cannot be completed because it conflicts
+ with the current state on the server.
+
+ .. versionadded:: 0.7
+ """
+
+ code = 409
+ description = (
+ "A conflict happened while processing the request. The"
+ " resource might have been modified while the request was being"
+ " processed."
+ )
+
+
+class Gone(HTTPException):
+ """*410* `Gone`
+
+ Raise if a resource existed previously and went away without new location.
+ """
+
+ code = 410
+ description = (
+ "The requested URL is no longer available on this server and"
+ " there is no forwarding address. If you followed a link from a"
+ " foreign page, please contact the author of this page."
+ )
+
+
+class LengthRequired(HTTPException):
+ """*411* `Length Required`
+
+ Raise if the browser submitted data but no ``Content-Length`` header which
+ is required for the kind of processing the server does.
+ """
+
+ code = 411
+ description = (
+ "A request with this method requires a valid <code>Content-"
+ "Length</code> header."
+ )
+
+
+class PreconditionFailed(HTTPException):
+ """*412* `Precondition Failed`
+
+ Status code used in combination with ``If-Match``, ``If-None-Match``, or
+ ``If-Unmodified-Since``.
+ """
+
+ code = 412
+ description = (
+ "The precondition on the request for the URL failed positive evaluation."
+ )
+
+
+class RequestEntityTooLarge(HTTPException):
+ """*413* `Request Entity Too Large`
+
+ The status code one should return if the data submitted exceeded a given
+ limit.
+ """
+
+ code = 413
+ description = "The data value transmitted exceeds the capacity limit."
+
+
+class RequestURITooLarge(HTTPException):
+ """*414* `Request URI Too Large`
+
+ Like *413* but for too long URLs.
+ """
+
+ code = 414
+ description = (
+ "The length of the requested URL exceeds the capacity limit for"
+ " this server. The request cannot be processed."
+ )
+
+
+class UnsupportedMediaType(HTTPException):
+ """*415* `Unsupported Media Type`
+
+ The status code returned if the server is unable to handle the media type
+ the client transmitted.
+ """
+
+ code = 415
+ description = (
+ "The server does not support the media type transmitted in the request."
+ )
+
+
+class RequestedRangeNotSatisfiable(HTTPException):
+ """*416* `Requested Range Not Satisfiable`
+
+ The client asked for an invalid part of the file.
+
+ .. versionadded:: 0.7
+ """
+
+ code = 416
+ description = "The server cannot provide the requested range."
+
+ def __init__(
+ self,
+ length: int | None = None,
+ units: str = "bytes",
+ description: str | None = None,
+ response: Response | None = None,
+ ) -> None:
+ """Takes an optional `Content-Range` header value based on ``length``
+ parameter.
+ """
+ super().__init__(description=description, response=response)
+ self.length = length
+ self.units = units
+
+ def get_headers(
+ self,
+ environ: WSGIEnvironment | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> list[tuple[str, str]]:
+ headers = super().get_headers(environ, scope)
+ if self.length is not None:
+ headers.append(("Content-Range", f"{self.units} */{self.length}"))
+ return headers
+
+
+class ExpectationFailed(HTTPException):
+ """*417* `Expectation Failed`
+
+ The server cannot meet the requirements of the Expect request-header.
+
+ .. versionadded:: 0.7
+ """
+
+ code = 417
+ description = "The server could not meet the requirements of the Expect header"
+
+
+class ImATeapot(HTTPException):
+ """*418* `I'm a teapot`
+
+ The server should return this if it is a teapot and someone attempted
+ to brew coffee with it.
+
+ .. versionadded:: 0.7
+ """
+
+ code = 418
+ description = "This server is a teapot, not a coffee machine"
+
+
+class UnprocessableEntity(HTTPException):
+ """*422* `Unprocessable Entity`
+
+ Used if the request is well formed, but the instructions are otherwise
+ incorrect.
+ """
+
+ code = 422
+ description = (
+ "The request was well-formed but was unable to be followed due"
+ " to semantic errors."
+ )
+
+
+class Locked(HTTPException):
+ """*423* `Locked`
+
+ Used if the resource that is being accessed is locked.
+ """
+
+ code = 423
+ description = "The resource that is being accessed is locked."
+
+
+class FailedDependency(HTTPException):
+ """*424* `Failed Dependency`
+
+ Used if the method could not be performed on the resource
+ because the requested action depended on another action and that action failed.
+ """
+
+ code = 424
+ description = (
+ "The method could not be performed on the resource because the"
+ " requested action depended on another action and that action"
+ " failed."
+ )
+
+
+class PreconditionRequired(HTTPException):
+ """*428* `Precondition Required`
+
+ The server requires this request to be conditional, typically to prevent
+ the lost update problem, which is a race condition between two or more
+ clients attempting to update a resource through PUT or DELETE. By requiring
+ each client to include a conditional header ("If-Match" or "If-Unmodified-
+ Since") with the proper value retained from a recent GET request, the
+ server ensures that each client has at least seen the previous revision of
+ the resource.
+ """
+
+ code = 428
+ description = (
+ "This request is required to be conditional; try using"
+ ' "If-Match" or "If-Unmodified-Since".'
+ )
+
+
+class _RetryAfter(HTTPException):
+ """Adds an optional ``retry_after`` parameter which will set the
+ ``Retry-After`` header. May be an :class:`int` number of seconds or
+ a :class:`~datetime.datetime`.
+ """
+
+ def __init__(
+ self,
+ description: str | None = None,
+ response: Response | None = None,
+ retry_after: datetime | int | None = None,
+ ) -> None:
+ super().__init__(description, response)
+ self.retry_after = retry_after
+
+ def get_headers(
+ self,
+ environ: WSGIEnvironment | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> list[tuple[str, str]]:
+ headers = super().get_headers(environ, scope)
+
+ if self.retry_after:
+ if isinstance(self.retry_after, datetime):
+ from .http import http_date
+
+ value = http_date(self.retry_after)
+ else:
+ value = str(self.retry_after)
+
+ headers.append(("Retry-After", value))
+
+ return headers
+
+
+class TooManyRequests(_RetryAfter):
+ """*429* `Too Many Requests`
+
+ The server is limiting the rate at which this user receives
+ responses, and this request exceeds that rate. (The server may use
+ any convenient method to identify users and their request rates).
+ The server may include a "Retry-After" header to indicate how long
+ the user should wait before retrying.
+
+ :param retry_after: If given, set the ``Retry-After`` header to this
+ value. May be an :class:`int` number of seconds or a
+ :class:`~datetime.datetime`.
+
+ .. versionchanged:: 1.0
+ Added ``retry_after`` parameter.
+ """
+
+ code = 429
+ description = "This user has exceeded an allotted request count. Try again later."
+
+
+class RequestHeaderFieldsTooLarge(HTTPException):
+ """*431* `Request Header Fields Too Large`
+
+ The server refuses to process the request because the header fields are too
+ large. One or more individual fields may be too large, or the set of all
+ headers is too large.
+ """
+
+ code = 431
+ description = "One or more header fields exceeds the maximum size."
+
+
+class UnavailableForLegalReasons(HTTPException):
+ """*451* `Unavailable For Legal Reasons`
+
+ This status code indicates that the server is denying access to the
+ resource as a consequence of a legal demand.
+ """
+
+ code = 451
+ description = "Unavailable for legal reasons."
+
+
+class InternalServerError(HTTPException):
+ """*500* `Internal Server Error`
+
+ Raise if an internal server error occurred. This is a good fallback if an
+ unknown error occurred in the dispatcher.
+
+ .. versionchanged:: 1.0.0
+ Added the :attr:`original_exception` attribute.
+ """
+
+ code = 500
+ description = (
+ "The server encountered an internal error and was unable to"
+ " complete your request. Either the server is overloaded or"
+ " there is an error in the application."
+ )
+
+ def __init__(
+ self,
+ description: str | None = None,
+ response: Response | None = None,
+ original_exception: BaseException | None = None,
+ ) -> None:
+ #: The original exception that caused this 500 error. Can be
+ #: used by frameworks to provide context when handling
+ #: unexpected errors.
+ self.original_exception = original_exception
+ super().__init__(description=description, response=response)
+
+
+class NotImplemented(HTTPException):
+ """*501* `Not Implemented`
+
+ Raise if the application does not support the action requested by the
+ browser.
+ """
+
+ code = 501
+ description = "The server does not support the action requested by the browser."
+
+
+class BadGateway(HTTPException):
+ """*502* `Bad Gateway`
+
+ If you do proxying in your application you should return this status code
+ if you received an invalid response from the upstream server it accessed
+ in attempting to fulfill the request.
+ """
+
+ code = 502
+ description = (
+ "The proxy server received an invalid response from an upstream server."
+ )
+
+
+class ServiceUnavailable(_RetryAfter):
+ """*503* `Service Unavailable`
+
+ Status code you should return if a service is temporarily
+ unavailable.
+
+ :param retry_after: If given, set the ``Retry-After`` header to this
+ value. May be an :class:`int` number of seconds or a
+ :class:`~datetime.datetime`.
+
+ .. versionchanged:: 1.0
+ Added ``retry_after`` parameter.
+ """
+
+ code = 503
+ description = (
+ "The server is temporarily unable to service your request due"
+ " to maintenance downtime or capacity problems. Please try"
+ " again later."
+ )
+
+
+class GatewayTimeout(HTTPException):
+ """*504* `Gateway Timeout`
+
+ Status code you should return if a connection to an upstream server
+ times out.
+ """
+
+ code = 504
+ description = "The connection to an upstream server timed out."
+
+
+class HTTPVersionNotSupported(HTTPException):
+ """*505* `HTTP Version Not Supported`
+
+ The server does not support the HTTP protocol version used in the request.
+ """
+
+ code = 505
+ description = (
+ "The server does not support the HTTP protocol version used in the request."
+ )
+
+
+default_exceptions: dict[int, type[HTTPException]] = {}
+
+
+def _find_exceptions() -> None:
+ for obj in globals().values():
+ try:
+ is_http_exception = issubclass(obj, HTTPException)
+ except TypeError:
+ is_http_exception = False
+ if not is_http_exception or obj.code is None:
+ continue
+ old_obj = default_exceptions.get(obj.code, None)
+ if old_obj is not None and issubclass(obj, old_obj):
+ continue
+ default_exceptions[obj.code] = obj
+
+
+_find_exceptions()
+del _find_exceptions
+
+
+class Aborter:
+ """When passed a dict of code -> exception items it can be used as
+ callable that raises exceptions. If the first argument to the
+ callable is an integer it will be looked up in the mapping, if it's
+ a WSGI application it will be raised in a proxy exception.
+
+ The rest of the arguments are forwarded to the exception constructor.
+ """
+
+ def __init__(
+ self,
+ mapping: dict[int, type[HTTPException]] | None = None,
+ extra: dict[int, type[HTTPException]] | None = None,
+ ) -> None:
+ if mapping is None:
+ mapping = default_exceptions
+ self.mapping = dict(mapping)
+ if extra is not None:
+ self.mapping.update(extra)
+
+ def __call__(
+ self, code: int | Response, *args: t.Any, **kwargs: t.Any
+ ) -> t.NoReturn:
+ from .sansio.response import Response
+
+ if isinstance(code, Response):
+ raise HTTPException(response=code)
+
+ if code not in self.mapping:
+ raise LookupError(f"no exception for {code!r}")
+
+ raise self.mapping[code](*args, **kwargs)
+
+
+def abort(status: int | Response, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
+ """Raises an :py:exc:`HTTPException` for the given status code or WSGI
+ application.
+
+ If a status code is given, it will be looked up in the list of
+ exceptions and will raise that exception. If passed a WSGI application,
+ it will wrap it in a proxy WSGI exception and raise that::
+
+ abort(404) # 404 Not Found
+ abort(Response('Hello World'))
+
+ """
+ _aborter(status, *args, **kwargs)
+
+
+_aborter: Aborter = Aborter()
diff --git a/venv/lib/python3.8/site-packages/werkzeug/formparser.py b/venv/lib/python3.8/site-packages/werkzeug/formparser.py
new file mode 100644
index 0000000..3c6875e
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/formparser.py
@@ -0,0 +1,430 @@
+from __future__ import annotations
+
+import typing as t
+from io import BytesIO
+from urllib.parse import parse_qsl
+
+from ._internal import _plain_int
+from .datastructures import FileStorage
+from .datastructures import Headers
+from .datastructures import MultiDict
+from .exceptions import RequestEntityTooLarge
+from .http import parse_options_header
+from .sansio.multipart import Data
+from .sansio.multipart import Epilogue
+from .sansio.multipart import Field
+from .sansio.multipart import File
+from .sansio.multipart import MultipartDecoder
+from .sansio.multipart import NeedData
+from .wsgi import get_content_length
+from .wsgi import get_input_stream
+
+# there are some platforms where SpooledTemporaryFile is not available.
+# In that case we need to provide a fallback.
+try:
+ from tempfile import SpooledTemporaryFile
+except ImportError:
+ from tempfile import TemporaryFile
+
+ SpooledTemporaryFile = None # type: ignore
+
+if t.TYPE_CHECKING:
+ import typing as te
+
+ from _typeshed.wsgi import WSGIEnvironment
+
+ t_parse_result = t.Tuple[
+ t.IO[bytes], MultiDict[str, str], MultiDict[str, FileStorage]
+ ]
+
+ class TStreamFactory(te.Protocol):
+ def __call__(
+ self,
+ total_content_length: int | None,
+ content_type: str | None,
+ filename: str | None,
+ content_length: int | None = None,
+ ) -> t.IO[bytes]: ...
+
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+
+def default_stream_factory(
+ total_content_length: int | None,
+ content_type: str | None,
+ filename: str | None,
+ content_length: int | None = None,
+) -> t.IO[bytes]:
+ max_size = 1024 * 500
+
+ if SpooledTemporaryFile is not None:
+ return t.cast(t.IO[bytes], SpooledTemporaryFile(max_size=max_size, mode="rb+"))
+ elif total_content_length is None or total_content_length > max_size:
+ return t.cast(t.IO[bytes], TemporaryFile("rb+"))
+
+ return BytesIO()
+
+
+def parse_form_data(
+ environ: WSGIEnvironment,
+ stream_factory: TStreamFactory | None = None,
+ max_form_memory_size: int | None = None,
+ max_content_length: int | None = None,
+ cls: type[MultiDict[str, t.Any]] | None = None,
+ silent: bool = True,
+ *,
+ max_form_parts: int | None = None,
+) -> t_parse_result:
+ """Parse the form data in the environ and return it as tuple in the form
+ ``(stream, form, files)``. You should only call this method if the
+ transport method is `POST`, `PUT`, or `PATCH`.
+
+ If the mimetype of the data transmitted is `multipart/form-data` the
+ files multidict will be filled with `FileStorage` objects. If the
+ mimetype is unknown the input stream is wrapped and returned as first
+ argument, else the stream is empty.
+
+ This is a shortcut for the common usage of :class:`FormDataParser`.
+
+ :param environ: the WSGI environment to be used for parsing.
+ :param stream_factory: An optional callable that returns a new read and
+ writeable file descriptor. This callable works
+ the same as :meth:`Response._get_file_stream`.
+ :param max_form_memory_size: the maximum number of bytes to be accepted for
+ in-memory stored form data. If the data
+ exceeds the value specified an
+ :exc:`~exceptions.RequestEntityTooLarge`
+ exception is raised.
+ :param max_content_length: If this is provided and the transmitted data
+ is longer than this value an
+ :exc:`~exceptions.RequestEntityTooLarge`
+ exception is raised.
+ :param cls: an optional dict class to use. If this is not specified
+ or `None` the default :class:`MultiDict` is used.
+ :param silent: If set to False parsing errors will not be caught.
+ :param max_form_parts: The maximum number of multipart parts to be parsed. If this
+ is exceeded, a :exc:`~exceptions.RequestEntityTooLarge` exception is raised.
+ :return: A tuple in the form ``(stream, form, files)``.
+
+ .. versionchanged:: 3.0
+ The ``charset`` and ``errors`` parameters were removed.
+
+ .. versionchanged:: 2.3
+ Added the ``max_form_parts`` parameter.
+
+ .. versionadded:: 0.5.1
+ Added the ``silent`` parameter.
+
+ .. versionadded:: 0.5
+ Added the ``max_form_memory_size``, ``max_content_length``, and ``cls``
+ parameters.
+ """
+ return FormDataParser(
+ stream_factory=stream_factory,
+ max_form_memory_size=max_form_memory_size,
+ max_content_length=max_content_length,
+ max_form_parts=max_form_parts,
+ silent=silent,
+ cls=cls,
+ ).parse_from_environ(environ)
+
+
+class FormDataParser:
+ """This class implements parsing of form data for Werkzeug. By itself
+ it can parse multipart and url encoded form data. It can be subclassed
+ and extended but for most mimetypes it is a better idea to use the
+ untouched stream and expose it as separate attributes on a request
+ object.
+
+ :param stream_factory: An optional callable that returns a new read and
+ writeable file descriptor. This callable works
+ the same as :meth:`Response._get_file_stream`.
+ :param max_form_memory_size: the maximum number of bytes to be accepted for
+ in-memory stored form data. If the data
+ exceeds the value specified an
+ :exc:`~exceptions.RequestEntityTooLarge`
+ exception is raised.
+ :param max_content_length: If this is provided and the transmitted data
+ is longer than this value an
+ :exc:`~exceptions.RequestEntityTooLarge`
+ exception is raised.
+ :param cls: an optional dict class to use. If this is not specified
+ or `None` the default :class:`MultiDict` is used.
+ :param silent: If set to False parsing errors will not be caught.
+ :param max_form_parts: The maximum number of multipart parts to be parsed. If this
+ is exceeded, a :exc:`~exceptions.RequestEntityTooLarge` exception is raised.
+
+ .. versionchanged:: 3.0
+ The ``charset`` and ``errors`` parameters were removed.
+
+ .. versionchanged:: 3.0
+ The ``parse_functions`` attribute and ``get_parse_func`` methods were removed.
+
+ .. versionchanged:: 2.2.3
+ Added the ``max_form_parts`` parameter.
+
+ .. versionadded:: 0.8
+ """
+
+ def __init__(
+ self,
+ stream_factory: TStreamFactory | None = None,
+ max_form_memory_size: int | None = None,
+ max_content_length: int | None = None,
+ cls: type[MultiDict[str, t.Any]] | None = None,
+ silent: bool = True,
+ *,
+ max_form_parts: int | None = None,
+ ) -> None:
+ if stream_factory is None:
+ stream_factory = default_stream_factory
+
+ self.stream_factory = stream_factory
+ self.max_form_memory_size = max_form_memory_size
+ self.max_content_length = max_content_length
+ self.max_form_parts = max_form_parts
+
+ if cls is None:
+ cls = t.cast("type[MultiDict[str, t.Any]]", MultiDict)
+
+ self.cls = cls
+ self.silent = silent
+
+ def parse_from_environ(self, environ: WSGIEnvironment) -> t_parse_result:
+ """Parses the information from the environment as form data.
+
+ :param environ: the WSGI environment to be used for parsing.
+ :return: A tuple in the form ``(stream, form, files)``.
+ """
+ stream = get_input_stream(environ, max_content_length=self.max_content_length)
+ content_length = get_content_length(environ)
+ mimetype, options = parse_options_header(environ.get("CONTENT_TYPE"))
+ return self.parse(
+ stream,
+ content_length=content_length,
+ mimetype=mimetype,
+ options=options,
+ )
+
+ def parse(
+ self,
+ stream: t.IO[bytes],
+ mimetype: str,
+ content_length: int | None,
+ options: dict[str, str] | None = None,
+ ) -> t_parse_result:
+ """Parses the information from the given stream, mimetype,
+ content length and mimetype parameters.
+
+ :param stream: an input stream
+ :param mimetype: the mimetype of the data
+ :param content_length: the content length of the incoming data
+ :param options: optional mimetype parameters (used for
+ the multipart boundary for instance)
+ :return: A tuple in the form ``(stream, form, files)``.
+
+ .. versionchanged:: 3.0
+ The invalid ``application/x-url-encoded`` content type is not
+ treated as ``application/x-www-form-urlencoded``.
+ """
+ if mimetype == "multipart/form-data":
+ parse_func = self._parse_multipart
+ elif mimetype == "application/x-www-form-urlencoded":
+ parse_func = self._parse_urlencoded
+ else:
+ return stream, self.cls(), self.cls()
+
+ if options is None:
+ options = {}
+
+ try:
+ return parse_func(stream, mimetype, content_length, options)
+ except ValueError:
+ if not self.silent:
+ raise
+
+ return stream, self.cls(), self.cls()
+
+ def _parse_multipart(
+ self,
+ stream: t.IO[bytes],
+ mimetype: str,
+ content_length: int | None,
+ options: dict[str, str],
+ ) -> t_parse_result:
+ parser = MultiPartParser(
+ stream_factory=self.stream_factory,
+ max_form_memory_size=self.max_form_memory_size,
+ max_form_parts=self.max_form_parts,
+ cls=self.cls,
+ )
+ boundary = options.get("boundary", "").encode("ascii")
+
+ if not boundary:
+ raise ValueError("Missing boundary")
+
+ form, files = parser.parse(stream, boundary, content_length)
+ return stream, form, files
+
+ def _parse_urlencoded(
+ self,
+ stream: t.IO[bytes],
+ mimetype: str,
+ content_length: int | None,
+ options: dict[str, str],
+ ) -> t_parse_result:
+ if (
+ self.max_form_memory_size is not None
+ and content_length is not None
+ and content_length > self.max_form_memory_size
+ ):
+ raise RequestEntityTooLarge()
+
+ items = parse_qsl(
+ stream.read().decode(),
+ keep_blank_values=True,
+ errors="werkzeug.url_quote",
+ )
+ return stream, self.cls(items), self.cls()
+
+
+class MultiPartParser:
+ def __init__(
+ self,
+ stream_factory: TStreamFactory | None = None,
+ max_form_memory_size: int | None = None,
+ cls: type[MultiDict[str, t.Any]] | None = None,
+ buffer_size: int = 64 * 1024,
+ max_form_parts: int | None = None,
+ ) -> None:
+ self.max_form_memory_size = max_form_memory_size
+ self.max_form_parts = max_form_parts
+
+ if stream_factory is None:
+ stream_factory = default_stream_factory
+
+ self.stream_factory = stream_factory
+
+ if cls is None:
+ cls = t.cast("type[MultiDict[str, t.Any]]", MultiDict)
+
+ self.cls = cls
+ self.buffer_size = buffer_size
+
+ def fail(self, message: str) -> te.NoReturn:
+ raise ValueError(message)
+
+ def get_part_charset(self, headers: Headers) -> str:
+ # Figure out input charset for current part
+ content_type = headers.get("content-type")
+
+ if content_type:
+ parameters = parse_options_header(content_type)[1]
+ ct_charset = parameters.get("charset", "").lower()
+
+ # A safe list of encodings. Modern clients should only send ASCII or UTF-8.
+ # This list will not be extended further.
+ if ct_charset in {"ascii", "us-ascii", "utf-8", "iso-8859-1"}:
+ return ct_charset
+
+ return "utf-8"
+
+ def start_file_streaming(
+ self, event: File, total_content_length: int | None
+ ) -> t.IO[bytes]:
+ content_type = event.headers.get("content-type")
+
+ try:
+ content_length = _plain_int(event.headers["content-length"])
+ except (KeyError, ValueError):
+ content_length = 0
+
+ container = self.stream_factory(
+ total_content_length=total_content_length,
+ filename=event.filename,
+ content_type=content_type,
+ content_length=content_length,
+ )
+ return container
+
+ def parse(
+ self, stream: t.IO[bytes], boundary: bytes, content_length: int | None
+ ) -> tuple[MultiDict[str, str], MultiDict[str, FileStorage]]:
+ current_part: Field | File
+ field_size: int | None = None
+ container: t.IO[bytes] | list[bytes]
+ _write: t.Callable[[bytes], t.Any]
+
+ parser = MultipartDecoder(
+ boundary,
+ max_form_memory_size=self.max_form_memory_size,
+ max_parts=self.max_form_parts,
+ )
+
+ fields = []
+ files = []
+
+ for data in _chunk_iter(stream.read, self.buffer_size):
+ parser.receive_data(data)
+ event = parser.next_event()
+ while not isinstance(event, (Epilogue, NeedData)):
+ if isinstance(event, Field):
+ current_part = event
+ field_size = 0
+ container = []
+ _write = container.append
+ elif isinstance(event, File):
+ current_part = event
+ field_size = None
+ container = self.start_file_streaming(event, content_length)
+ _write = container.write
+ elif isinstance(event, Data):
+ if self.max_form_memory_size is not None and field_size is not None:
+ # Ensure that accumulated data events do not exceed limit.
+ # Also checked within single event in MultipartDecoder.
+ field_size += len(event.data)
+
+ if field_size > self.max_form_memory_size:
+ raise RequestEntityTooLarge()
+
+ _write(event.data)
+ if not event.more_data:
+ if isinstance(current_part, Field):
+ value = b"".join(container).decode(
+ self.get_part_charset(current_part.headers), "replace"
+ )
+ fields.append((current_part.name, value))
+ else:
+ container = t.cast(t.IO[bytes], container)
+ container.seek(0)
+ files.append(
+ (
+ current_part.name,
+ FileStorage(
+ container,
+ current_part.filename,
+ current_part.name,
+ headers=current_part.headers,
+ ),
+ )
+ )
+
+ event = parser.next_event()
+
+ return self.cls(fields), self.cls(files)
+
+
+def _chunk_iter(read: t.Callable[[int], bytes], size: int) -> t.Iterator[bytes | None]:
+ """Read data in chunks for multipart/form-data parsing. Stop if no data is read.
+ Yield ``None`` at the end to signal end of parsing.
+ """
+ while True:
+ data = read(size)
+
+ if not data:
+ break
+
+ yield data
+
+ yield None
diff --git a/venv/lib/python3.8/site-packages/werkzeug/http.py b/venv/lib/python3.8/site-packages/werkzeug/http.py
new file mode 100644
index 0000000..cb8bc25
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/http.py
@@ -0,0 +1,1391 @@
+from __future__ import annotations
+
+import email.utils
+import re
+import typing as t
+import warnings
+from datetime import date
+from datetime import datetime
+from datetime import time
+from datetime import timedelta
+from datetime import timezone
+from enum import Enum
+from hashlib import sha1
+from time import mktime
+from time import struct_time
+from urllib.parse import quote
+from urllib.parse import unquote
+from urllib.request import parse_http_list as _parse_list_header
+
+from ._internal import _dt_as_utc
+from ._internal import _plain_int
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIEnvironment
+
+_token_chars = frozenset(
+ "!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~"
+)
+_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
+_entity_headers = frozenset(
+ [
+ "allow",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-md5",
+ "content-range",
+ "content-type",
+ "expires",
+ "last-modified",
+ ]
+)
+_hop_by_hop_headers = frozenset(
+ [
+ "connection",
+ "keep-alive",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "te",
+ "trailer",
+ "transfer-encoding",
+ "upgrade",
+ ]
+)
+HTTP_STATUS_CODES = {
+ 100: "Continue",
+ 101: "Switching Protocols",
+ 102: "Processing",
+ 103: "Early Hints", # see RFC 8297
+ 200: "OK",
+ 201: "Created",
+ 202: "Accepted",
+ 203: "Non Authoritative Information",
+ 204: "No Content",
+ 205: "Reset Content",
+ 206: "Partial Content",
+ 207: "Multi Status",
+ 208: "Already Reported", # see RFC 5842
+ 226: "IM Used", # see RFC 3229
+ 300: "Multiple Choices",
+ 301: "Moved Permanently",
+ 302: "Found",
+ 303: "See Other",
+ 304: "Not Modified",
+ 305: "Use Proxy",
+ 306: "Switch Proxy", # unused
+ 307: "Temporary Redirect",
+ 308: "Permanent Redirect",
+ 400: "Bad Request",
+ 401: "Unauthorized",
+ 402: "Payment Required", # unused
+ 403: "Forbidden",
+ 404: "Not Found",
+ 405: "Method Not Allowed",
+ 406: "Not Acceptable",
+ 407: "Proxy Authentication Required",
+ 408: "Request Timeout",
+ 409: "Conflict",
+ 410: "Gone",
+ 411: "Length Required",
+ 412: "Precondition Failed",
+ 413: "Request Entity Too Large",
+ 414: "Request URI Too Long",
+ 415: "Unsupported Media Type",
+ 416: "Requested Range Not Satisfiable",
+ 417: "Expectation Failed",
+ 418: "I'm a teapot", # see RFC 2324
+ 421: "Misdirected Request", # see RFC 7540
+ 422: "Unprocessable Entity",
+ 423: "Locked",
+ 424: "Failed Dependency",
+ 425: "Too Early", # see RFC 8470
+ 426: "Upgrade Required",
+ 428: "Precondition Required", # see RFC 6585
+ 429: "Too Many Requests",
+ 431: "Request Header Fields Too Large",
+ 449: "Retry With", # proprietary MS extension
+ 451: "Unavailable For Legal Reasons",
+ 500: "Internal Server Error",
+ 501: "Not Implemented",
+ 502: "Bad Gateway",
+ 503: "Service Unavailable",
+ 504: "Gateway Timeout",
+ 505: "HTTP Version Not Supported",
+ 506: "Variant Also Negotiates", # see RFC 2295
+ 507: "Insufficient Storage",
+ 508: "Loop Detected", # see RFC 5842
+ 510: "Not Extended",
+ 511: "Network Authentication Failed",
+}
+
+
+class COEP(Enum):
+ """Cross Origin Embedder Policies"""
+
+ UNSAFE_NONE = "unsafe-none"
+ REQUIRE_CORP = "require-corp"
+
+
+class COOP(Enum):
+ """Cross Origin Opener Policies"""
+
+ UNSAFE_NONE = "unsafe-none"
+ SAME_ORIGIN_ALLOW_POPUPS = "same-origin-allow-popups"
+ SAME_ORIGIN = "same-origin"
+
+
+def quote_header_value(value: t.Any, allow_token: bool = True) -> str:
+ """Add double quotes around a header value. If the header contains only ASCII token
+ characters, it will be returned unchanged. If the header contains ``"`` or ``\\``
+ characters, they will be escaped with an additional ``\\`` character.
+
+ This is the reverse of :func:`unquote_header_value`.
+
+ :param value: The value to quote. Will be converted to a string.
+ :param allow_token: Disable to quote the value even if it only has token characters.
+
+ .. versionchanged:: 3.0
+ Passing bytes is not supported.
+
+ .. versionchanged:: 3.0
+ The ``extra_chars`` parameter is removed.
+
+ .. versionchanged:: 2.3
+ The value is quoted if it is the empty string.
+
+ .. versionadded:: 0.5
+ """
+ value_str = str(value)
+
+ if not value_str:
+ return '""'
+
+ if allow_token:
+ token_chars = _token_chars
+
+ if token_chars.issuperset(value_str):
+ return value_str
+
+ value_str = value_str.replace("\\", "\\\\").replace('"', '\\"')
+ return f'"{value_str}"'
+
+
+def unquote_header_value(value: str) -> str:
+ """Remove double quotes and decode slash-escaped ``"`` and ``\\`` characters in a
+ header value.
+
+ This is the reverse of :func:`quote_header_value`.
+
+ :param value: The header value to unquote.
+
+ .. versionchanged:: 3.0
+ The ``is_filename`` parameter is removed.
+ """
+ if len(value) >= 2 and value[0] == value[-1] == '"':
+ value = value[1:-1]
+ return value.replace("\\\\", "\\").replace('\\"', '"')
+
+ return value
+
+
+def dump_options_header(header: str | None, options: t.Mapping[str, t.Any]) -> str:
+ """Produce a header value and ``key=value`` parameters separated by semicolons
+ ``;``. For example, the ``Content-Type`` header.
+
+ .. code-block:: python
+
+ dump_options_header("text/html", {"charset": "UTF-8"})
+ 'text/html; charset=UTF-8'
+
+ This is the reverse of :func:`parse_options_header`.
+
+ If a value contains non-token characters, it will be quoted.
+
+ If a value is ``None``, the parameter is skipped.
+
+ In some keys for some headers, a UTF-8 value can be encoded using a special
+ ``key*=UTF-8''value`` form, where ``value`` is percent encoded. This function will
+ not produce that format automatically, but if a given key ends with an asterisk
+ ``*``, the value is assumed to have that form and will not be quoted further.
+
+ :param header: The primary header value.
+ :param options: Parameters to encode as ``key=value`` pairs.
+
+ .. versionchanged:: 2.3
+ Keys with ``None`` values are skipped rather than treated as a bare key.
+
+ .. versionchanged:: 2.2.3
+ If a key ends with ``*``, its value will not be quoted.
+ """
+ segments = []
+
+ if header is not None:
+ segments.append(header)
+
+ for key, value in options.items():
+ if value is None:
+ continue
+
+ if key[-1] == "*":
+ segments.append(f"{key}={value}")
+ else:
+ segments.append(f"{key}={quote_header_value(value)}")
+
+ return "; ".join(segments)
+
+
+def dump_header(iterable: dict[str, t.Any] | t.Iterable[t.Any]) -> str:
+ """Produce a header value from a list of items or ``key=value`` pairs, separated by
+ commas ``,``.
+
+ This is the reverse of :func:`parse_list_header`, :func:`parse_dict_header`, and
+ :func:`parse_set_header`.
+
+ If a value contains non-token characters, it will be quoted.
+
+ If a value is ``None``, the key is output alone.
+
+ In some keys for some headers, a UTF-8 value can be encoded using a special
+ ``key*=UTF-8''value`` form, where ``value`` is percent encoded. This function will
+ not produce that format automatically, but if a given key ends with an asterisk
+ ``*``, the value is assumed to have that form and will not be quoted further.
+
+ .. code-block:: python
+
+ dump_header(["foo", "bar baz"])
+ 'foo, "bar baz"'
+
+ dump_header({"foo": "bar baz"})
+ 'foo="bar baz"'
+
+ :param iterable: The items to create a header from.
+
+ .. versionchanged:: 3.0
+ The ``allow_token`` parameter is removed.
+
+ .. versionchanged:: 2.2.3
+ If a key ends with ``*``, its value will not be quoted.
+ """
+ if isinstance(iterable, dict):
+ items = []
+
+ for key, value in iterable.items():
+ if value is None:
+ items.append(key)
+ elif key[-1] == "*":
+ items.append(f"{key}={value}")
+ else:
+ items.append(f"{key}={quote_header_value(value)}")
+ else:
+ items = [quote_header_value(x) for x in iterable]
+
+ return ", ".join(items)
+
+
+def dump_csp_header(header: ds.ContentSecurityPolicy) -> str:
+ """Dump a Content Security Policy header.
+
+ These are structured into policies such as "default-src 'self';
+ script-src 'self'".
+
+ .. versionadded:: 1.0.0
+ Support for Content Security Policy headers was added.
+
+ """
+ return "; ".join(f"{key} {value}" for key, value in header.items())
+
+
+def parse_list_header(value: str) -> list[str]:
+ """Parse a header value that consists of a list of comma separated items according
+ to `RFC 9110 <https://httpwg.org/specs/rfc9110.html#abnf.extension>`__.
+
+ This extends :func:`urllib.request.parse_http_list` to remove surrounding quotes
+ from values.
+
+ .. code-block:: python
+
+ parse_list_header('token, "quoted value"')
+ ['token', 'quoted value']
+
+ This is the reverse of :func:`dump_header`.
+
+ :param value: The header value to parse.
+ """
+ result = []
+
+ for item in _parse_list_header(value):
+ if len(item) >= 2 and item[0] == item[-1] == '"':
+ item = item[1:-1]
+
+ result.append(item)
+
+ return result
+
+
+def parse_dict_header(value: str) -> dict[str, str | None]:
+ """Parse a list header using :func:`parse_list_header`, then parse each item as a
+ ``key=value`` pair.
+
+ .. code-block:: python
+
+ parse_dict_header('a=b, c="d, e", f')
+ {"a": "b", "c": "d, e", "f": None}
+
+ This is the reverse of :func:`dump_header`.
+
+ If a key does not have a value, it is ``None``.
+
+ This handles charsets for values as described in
+ `RFC 2231 <https://www.rfc-editor.org/rfc/rfc2231#section-3>`__. Only ASCII, UTF-8,
+ and ISO-8859-1 charsets are accepted, otherwise the value remains quoted.
+
+ :param value: The header value to parse.
+
+ .. versionchanged:: 3.0
+ Passing bytes is not supported.
+
+ .. versionchanged:: 3.0
+ The ``cls`` argument is removed.
+
+ .. versionchanged:: 2.3
+ Added support for ``key*=charset''value`` encoded items.
+
+ .. versionchanged:: 0.9
+ The ``cls`` argument was added.
+ """
+ result: dict[str, str | None] = {}
+
+ for item in parse_list_header(value):
+ key, has_value, value = item.partition("=")
+ key = key.strip()
+
+ if not key:
+ # =value is not valid
+ continue
+
+ if not has_value:
+ result[key] = None
+ continue
+
+ value = value.strip()
+ encoding: str | None = None
+
+ if key[-1] == "*":
+ # key*=charset''value becomes key=value, where value is percent encoded
+ # adapted from parse_options_header, without the continuation handling
+ key = key[:-1]
+ match = _charset_value_re.match(value)
+
+ if match:
+ # If there is a charset marker in the value, split it off.
+ encoding, value = match.groups()
+ encoding = encoding.lower()
+
+ # A safe list of encodings. Modern clients should only send ASCII or UTF-8.
+ # This list will not be extended further. An invalid encoding will leave the
+ # value quoted.
+ if encoding in {"ascii", "us-ascii", "utf-8", "iso-8859-1"}:
+ # invalid bytes are replaced during unquoting
+ value = unquote(value, encoding=encoding)
+
+ if len(value) >= 2 and value[0] == value[-1] == '"':
+ value = value[1:-1]
+
+ result[key] = value
+
+ return result
+
+
+# https://httpwg.org/specs/rfc9110.html#parameter
+_parameter_key_re = re.compile(r"([\w!#$%&'*+\-.^`|~]+)=", flags=re.ASCII)
+_parameter_token_value_re = re.compile(r"[\w!#$%&'*+\-.^`|~]+", flags=re.ASCII)
+# https://www.rfc-editor.org/rfc/rfc2231#section-4
+_charset_value_re = re.compile(
+ r"""
+ ([\w!#$%&*+\-.^`|~]*)' # charset part, could be empty
+ [\w!#$%&*+\-.^`|~]*' # don't care about language part, usually empty
+ ([\w!#$%&'*+\-.^`|~]+) # one or more token chars with percent encoding
+ """,
+ re.ASCII | re.VERBOSE,
+)
+# https://www.rfc-editor.org/rfc/rfc2231#section-3
+_continuation_re = re.compile(r"\*(\d+)$", re.ASCII)
+
+
+def parse_options_header(value: str | None) -> tuple[str, dict[str, str]]:
+ """Parse a header that consists of a value with ``key=value`` parameters separated
+ by semicolons ``;``. For example, the ``Content-Type`` header.
+
+ .. code-block:: python
+
+ parse_options_header("text/html; charset=UTF-8")
+ ('text/html', {'charset': 'UTF-8'})
+
+ parse_options_header("")
+ ("", {})
+
+ This is the reverse of :func:`dump_options_header`.
+
+ This parses valid parameter parts as described in
+ `RFC 9110 <https://httpwg.org/specs/rfc9110.html#parameter>`__. Invalid parts are
+ skipped.
+
+ This handles continuations and charsets as described in
+ `RFC 2231 <https://www.rfc-editor.org/rfc/rfc2231#section-3>`__, although not as
+ strictly as the RFC. Only ASCII, UTF-8, and ISO-8859-1 charsets are accepted,
+ otherwise the value remains quoted.
+
+ Clients may not be consistent in how they handle a quote character within a quoted
+ value. The `HTML Standard <https://html.spec.whatwg.org/#multipart-form-data>`__
+ replaces it with ``%22`` in multipart form data.
+ `RFC 9110 <https://httpwg.org/specs/rfc9110.html#quoted.strings>`__ uses backslash
+ escapes in HTTP headers. Both are decoded to the ``"`` character.
+
+ Clients may not be consistent in how they handle non-ASCII characters. HTML
+ documents must declare ``<meta charset=UTF-8>``, otherwise browsers may replace with
+ HTML character references, which can be decoded using :func:`html.unescape`.
+
+ :param value: The header value to parse.
+ :return: ``(value, options)``, where ``options`` is a dict
+
+ .. versionchanged:: 2.3
+ Invalid parts, such as keys with no value, quoted keys, and incorrectly quoted
+ values, are discarded instead of treating as ``None``.
+
+ .. versionchanged:: 2.3
+ Only ASCII, UTF-8, and ISO-8859-1 are accepted for charset values.
+
+ .. versionchanged:: 2.3
+ Escaped quotes in quoted values, like ``%22`` and ``\\"``, are handled.
+
+ .. versionchanged:: 2.2
+ Option names are always converted to lowercase.
+
+ .. versionchanged:: 2.2
+ The ``multiple`` parameter was removed.
+
+ .. versionchanged:: 0.15
+ :rfc:`2231` parameter continuations are handled.
+
+ .. versionadded:: 0.5
+ """
+ if value is None:
+ return "", {}
+
+ value, _, rest = value.partition(";")
+ value = value.strip()
+ rest = rest.strip()
+
+ if not value or not rest:
+ # empty (invalid) value, or value without options
+ return value, {}
+
+ # Collect all valid key=value parts without processing the value.
+ parts: list[tuple[str, str]] = []
+
+ while True:
+ if (m := _parameter_key_re.match(rest)) is not None:
+ pk = m.group(1).lower()
+ rest = rest[m.end() :]
+
+ # Value may be a token.
+ if (m := _parameter_token_value_re.match(rest)) is not None:
+ parts.append((pk, m.group()))
+
+ # Value may be a quoted string, find the closing quote.
+ elif rest[:1] == '"':
+ pos = 1
+ length = len(rest)
+
+ while pos < length:
+ if rest[pos : pos + 2] in {"\\\\", '\\"'}:
+ # Consume escaped slashes and quotes.
+ pos += 2
+ elif rest[pos] == '"':
+ # Stop at an unescaped quote.
+ parts.append((pk, rest[: pos + 1]))
+ rest = rest[pos + 1 :]
+ break
+ else:
+ # Consume any other character.
+ pos += 1
+
+ # Find the next section delimited by `;`, if any.
+ if (end := rest.find(";")) == -1:
+ break
+
+ rest = rest[end + 1 :].lstrip()
+
+ options: dict[str, str] = {}
+ encoding: str | None = None
+ continued_encoding: str | None = None
+
+ # For each collected part, process optional charset and continuation,
+ # unquote quoted values.
+ for pk, pv in parts:
+ if pk[-1] == "*":
+ # key*=charset''value becomes key=value, where value is percent encoded
+ pk = pk[:-1]
+ match = _charset_value_re.match(pv)
+
+ if match:
+ # If there is a valid charset marker in the value, split it off.
+ encoding, pv = match.groups()
+ # This might be the empty string, handled next.
+ encoding = encoding.lower()
+
+ # No charset marker, or marker with empty charset value.
+ if not encoding:
+ encoding = continued_encoding
+
+ # A safe list of encodings. Modern clients should only send ASCII or UTF-8.
+ # This list will not be extended further. An invalid encoding will leave the
+ # value quoted.
+ if encoding in {"ascii", "us-ascii", "utf-8", "iso-8859-1"}:
+ # Continuation parts don't require their own charset marker. This is
+ # looser than the RFC, it will persist across different keys and allows
+ # changing the charset during a continuation. But this implementation is
+ # much simpler than tracking the full state.
+ continued_encoding = encoding
+ # invalid bytes are replaced during unquoting
+ pv = unquote(pv, encoding=encoding)
+
+ # Remove quotes. At this point the value cannot be empty or a single quote.
+ if pv[0] == pv[-1] == '"':
+ # HTTP headers use slash, multipart form data uses percent
+ pv = pv[1:-1].replace("\\\\", "\\").replace('\\"', '"').replace("%22", '"')
+
+ match = _continuation_re.search(pk)
+
+ if match:
+ # key*0=a; key*1=b becomes key=ab
+ pk = pk[: match.start()]
+ options[pk] = options.get(pk, "") + pv
+ else:
+ options[pk] = pv
+
+ return value, options
+
+
+_q_value_re = re.compile(r"-?\d+(\.\d+)?", re.ASCII)
+_TAnyAccept = t.TypeVar("_TAnyAccept", bound="ds.Accept")
+
+
+@t.overload
+def parse_accept_header(value: str | None) -> ds.Accept: ...
+
+
+@t.overload
+def parse_accept_header(value: str | None, cls: type[_TAnyAccept]) -> _TAnyAccept: ...
+
+
+def parse_accept_header(
+ value: str | None, cls: type[_TAnyAccept] | None = None
+) -> _TAnyAccept:
+ """Parse an ``Accept`` header according to
+ `RFC 9110 <https://httpwg.org/specs/rfc9110.html#field.accept>`__.
+
+ Returns an :class:`.Accept` instance, which can sort and inspect items based on
+ their quality parameter. When parsing ``Accept-Charset``, ``Accept-Encoding``, or
+ ``Accept-Language``, pass the appropriate :class:`.Accept` subclass.
+
+ :param value: The header value to parse.
+ :param cls: The :class:`.Accept` class to wrap the result in.
+ :return: An instance of ``cls``.
+
+ .. versionchanged:: 2.3
+ Parse according to RFC 9110. Items with invalid ``q`` values are skipped.
+ """
+ if cls is None:
+ cls = t.cast(t.Type[_TAnyAccept], ds.Accept)
+
+ if not value:
+ return cls(None)
+
+ result = []
+
+ for item in parse_list_header(value):
+ item, options = parse_options_header(item)
+
+ if "q" in options:
+ # pop q, remaining options are reconstructed
+ q_str = options.pop("q").strip()
+
+ if _q_value_re.fullmatch(q_str) is None:
+ # ignore an invalid q
+ continue
+
+ q = float(q_str)
+
+ if q < 0 or q > 1:
+ # ignore an invalid q
+ continue
+ else:
+ q = 1
+
+ if options:
+ # reconstruct the media type with any options
+ item = dump_options_header(item, options)
+
+ result.append((item, q))
+
+ return cls(result)
+
+
+_TAnyCC = t.TypeVar("_TAnyCC", bound="ds.cache_control._CacheControl")
+
+
+@t.overload
+def parse_cache_control_header(
+ value: str | None,
+ on_update: t.Callable[[ds.cache_control._CacheControl], None] | None = None,
+) -> ds.RequestCacheControl: ...
+
+
+@t.overload
+def parse_cache_control_header(
+ value: str | None,
+ on_update: t.Callable[[ds.cache_control._CacheControl], None] | None = None,
+ cls: type[_TAnyCC] = ...,
+) -> _TAnyCC: ...
+
+
+def parse_cache_control_header(
+ value: str | None,
+ on_update: t.Callable[[ds.cache_control._CacheControl], None] | None = None,
+ cls: type[_TAnyCC] | None = None,
+) -> _TAnyCC:
+ """Parse a cache control header. The RFC differs between response and
+ request cache control, this method does not. It's your responsibility
+ to not use the wrong control statements.
+
+ .. versionadded:: 0.5
+ The `cls` was added. If not specified an immutable
+ :class:`~werkzeug.datastructures.RequestCacheControl` is returned.
+
+ :param value: a cache control header to be parsed.
+ :param on_update: an optional callable that is called every time a value
+ on the :class:`~werkzeug.datastructures.CacheControl`
+ object is changed.
+ :param cls: the class for the returned object. By default
+ :class:`~werkzeug.datastructures.RequestCacheControl` is used.
+ :return: a `cls` object.
+ """
+ if cls is None:
+ cls = t.cast("type[_TAnyCC]", ds.RequestCacheControl)
+
+ if not value:
+ return cls((), on_update)
+
+ return cls(parse_dict_header(value), on_update)
+
+
+_TAnyCSP = t.TypeVar("_TAnyCSP", bound="ds.ContentSecurityPolicy")
+
+
+@t.overload
+def parse_csp_header(
+ value: str | None,
+ on_update: t.Callable[[ds.ContentSecurityPolicy], None] | None = None,
+) -> ds.ContentSecurityPolicy: ...
+
+
+@t.overload
+def parse_csp_header(
+ value: str | None,
+ on_update: t.Callable[[ds.ContentSecurityPolicy], None] | None = None,
+ cls: type[_TAnyCSP] = ...,
+) -> _TAnyCSP: ...
+
+
+def parse_csp_header(
+ value: str | None,
+ on_update: t.Callable[[ds.ContentSecurityPolicy], None] | None = None,
+ cls: type[_TAnyCSP] | None = None,
+) -> _TAnyCSP:
+ """Parse a Content Security Policy header.
+
+ .. versionadded:: 1.0.0
+ Support for Content Security Policy headers was added.
+
+ :param value: a csp header to be parsed.
+ :param on_update: an optional callable that is called every time a value
+ on the object is changed.
+ :param cls: the class for the returned object. By default
+ :class:`~werkzeug.datastructures.ContentSecurityPolicy` is used.
+ :return: a `cls` object.
+ """
+ if cls is None:
+ cls = t.cast("type[_TAnyCSP]", ds.ContentSecurityPolicy)
+
+ if value is None:
+ return cls((), on_update)
+
+ items = []
+
+ for policy in value.split(";"):
+ policy = policy.strip()
+
+ # Ignore badly formatted policies (no space)
+ if " " in policy:
+ directive, value = policy.strip().split(" ", 1)
+ items.append((directive.strip(), value.strip()))
+
+ return cls(items, on_update)
+
+
+def parse_set_header(
+ value: str | None,
+ on_update: t.Callable[[ds.HeaderSet], None] | None = None,
+) -> ds.HeaderSet:
+ """Parse a set-like header and return a
+ :class:`~werkzeug.datastructures.HeaderSet` object:
+
+ >>> hs = parse_set_header('token, "quoted value"')
+
+ The return value is an object that treats the items case-insensitively
+ and keeps the order of the items:
+
+ >>> 'TOKEN' in hs
+ True
+ >>> hs.index('quoted value')
+ 1
+ >>> hs
+ HeaderSet(['token', 'quoted value'])
+
+ To create a header from the :class:`HeaderSet` again, use the
+ :func:`dump_header` function.
+
+ :param value: a set header to be parsed.
+ :param on_update: an optional callable that is called every time a
+ value on the :class:`~werkzeug.datastructures.HeaderSet`
+ object is changed.
+ :return: a :class:`~werkzeug.datastructures.HeaderSet`
+ """
+ if not value:
+ return ds.HeaderSet(None, on_update)
+ return ds.HeaderSet(parse_list_header(value), on_update)
+
+
+def parse_if_range_header(value: str | None) -> ds.IfRange:
+ """Parses an if-range header which can be an etag or a date. Returns
+ a :class:`~werkzeug.datastructures.IfRange` object.
+
+ .. versionchanged:: 2.0
+ If the value represents a datetime, it is timezone-aware.
+
+ .. versionadded:: 0.7
+ """
+ if not value:
+ return ds.IfRange()
+ date = parse_date(value)
+ if date is not None:
+ return ds.IfRange(date=date)
+ # drop weakness information
+ return ds.IfRange(unquote_etag(value)[0])
+
+
+def parse_range_header(
+ value: str | None, make_inclusive: bool = True
+) -> ds.Range | None:
+ """Parses a range header into a :class:`~werkzeug.datastructures.Range`
+ object. If the header is missing or malformed `None` is returned.
+ `ranges` is a list of ``(start, stop)`` tuples where the ranges are
+ non-inclusive.
+
+ .. versionadded:: 0.7
+ """
+ if not value or "=" not in value:
+ return None
+
+ ranges = []
+ last_end = 0
+ units, rng = value.split("=", 1)
+ units = units.strip().lower()
+
+ for item in rng.split(","):
+ item = item.strip()
+ if "-" not in item:
+ return None
+ if item.startswith("-"):
+ if last_end < 0:
+ return None
+ try:
+ begin = _plain_int(item)
+ except ValueError:
+ return None
+ end = None
+ last_end = -1
+ elif "-" in item:
+ begin_str, end_str = item.split("-", 1)
+ begin_str = begin_str.strip()
+ end_str = end_str.strip()
+
+ try:
+ begin = _plain_int(begin_str)
+ except ValueError:
+ return None
+
+ if begin < last_end or last_end < 0:
+ return None
+ if end_str:
+ try:
+ end = _plain_int(end_str) + 1
+ except ValueError:
+ return None
+
+ if begin >= end:
+ return None
+ else:
+ end = None
+ last_end = end if end is not None else -1
+ ranges.append((begin, end))
+
+ return ds.Range(units, ranges)
+
+
+def parse_content_range_header(
+ value: str | None,
+ on_update: t.Callable[[ds.ContentRange], None] | None = None,
+) -> ds.ContentRange | None:
+ """Parses a range header into a
+ :class:`~werkzeug.datastructures.ContentRange` object or `None` if
+ parsing is not possible.
+
+ .. versionadded:: 0.7
+
+ :param value: a content range header to be parsed.
+ :param on_update: an optional callable that is called every time a value
+ on the :class:`~werkzeug.datastructures.ContentRange`
+ object is changed.
+ """
+ if value is None:
+ return None
+ try:
+ units, rangedef = (value or "").strip().split(None, 1)
+ except ValueError:
+ return None
+
+ if "/" not in rangedef:
+ return None
+ rng, length_str = rangedef.split("/", 1)
+ if length_str == "*":
+ length = None
+ else:
+ try:
+ length = _plain_int(length_str)
+ except ValueError:
+ return None
+
+ if rng == "*":
+ if not is_byte_range_valid(None, None, length):
+ return None
+
+ return ds.ContentRange(units, None, None, length, on_update=on_update)
+ elif "-" not in rng:
+ return None
+
+ start_str, stop_str = rng.split("-", 1)
+ try:
+ start = _plain_int(start_str)
+ stop = _plain_int(stop_str) + 1
+ except ValueError:
+ return None
+
+ if is_byte_range_valid(start, stop, length):
+ return ds.ContentRange(units, start, stop, length, on_update=on_update)
+
+ return None
+
+
+def quote_etag(etag: str, weak: bool = False) -> str:
+ """Quote an etag.
+
+ :param etag: the etag to quote.
+ :param weak: set to `True` to tag it "weak".
+ """
+ if '"' in etag:
+ raise ValueError("invalid etag")
+ etag = f'"{etag}"'
+ if weak:
+ etag = f"W/{etag}"
+ return etag
+
+
+def unquote_etag(
+ etag: str | None,
+) -> tuple[str, bool] | tuple[None, None]:
+ """Unquote a single etag:
+
+ >>> unquote_etag('W/"bar"')
+ ('bar', True)
+ >>> unquote_etag('"bar"')
+ ('bar', False)
+
+ :param etag: the etag identifier to unquote.
+ :return: a ``(etag, weak)`` tuple.
+ """
+ if not etag:
+ return None, None
+ etag = etag.strip()
+ weak = False
+ if etag.startswith(("W/", "w/")):
+ weak = True
+ etag = etag[2:]
+ if etag[:1] == etag[-1:] == '"':
+ etag = etag[1:-1]
+ return etag, weak
+
+
+def parse_etags(value: str | None) -> ds.ETags:
+ """Parse an etag header.
+
+ :param value: the tag header to parse
+ :return: an :class:`~werkzeug.datastructures.ETags` object.
+ """
+ if not value:
+ return ds.ETags()
+ strong = []
+ weak = []
+ end = len(value)
+ pos = 0
+ while pos < end:
+ match = _etag_re.match(value, pos)
+ if match is None:
+ break
+ is_weak, quoted, raw = match.groups()
+ if raw == "*":
+ return ds.ETags(star_tag=True)
+ elif quoted:
+ raw = quoted
+ if is_weak:
+ weak.append(raw)
+ else:
+ strong.append(raw)
+ pos = match.end()
+ return ds.ETags(strong, weak)
+
+
+def generate_etag(data: bytes) -> str:
+ """Generate an etag for some data.
+
+ .. versionchanged:: 2.0
+ Use SHA-1. MD5 may not be available in some environments.
+ """
+ return sha1(data).hexdigest()
+
+
+def parse_date(value: str | None) -> datetime | None:
+ """Parse an :rfc:`2822` date into a timezone-aware
+ :class:`datetime.datetime` object, or ``None`` if parsing fails.
+
+ This is a wrapper for :func:`email.utils.parsedate_to_datetime`. It
+ returns ``None`` if parsing fails instead of raising an exception,
+ and always returns a timezone-aware datetime object. If the string
+ doesn't have timezone information, it is assumed to be UTC.
+
+ :param value: A string with a supported date format.
+
+ .. versionchanged:: 2.0
+ Return a timezone-aware datetime object. Use
+ ``email.utils.parsedate_to_datetime``.
+ """
+ if value is None:
+ return None
+
+ try:
+ dt = email.utils.parsedate_to_datetime(value)
+ except (TypeError, ValueError):
+ return None
+
+ if dt.tzinfo is None:
+ return dt.replace(tzinfo=timezone.utc)
+
+ return dt
+
+
+def http_date(
+ timestamp: datetime | date | int | float | struct_time | None = None,
+) -> str:
+ """Format a datetime object or timestamp into an :rfc:`2822` date
+ string.
+
+ This is a wrapper for :func:`email.utils.format_datetime`. It
+ assumes naive datetime objects are in UTC instead of raising an
+ exception.
+
+ :param timestamp: The datetime or timestamp to format. Defaults to
+ the current time.
+
+ .. versionchanged:: 2.0
+ Use ``email.utils.format_datetime``. Accept ``date`` objects.
+ """
+ if isinstance(timestamp, date):
+ if not isinstance(timestamp, datetime):
+ # Assume plain date is midnight UTC.
+ timestamp = datetime.combine(timestamp, time(), tzinfo=timezone.utc)
+ else:
+ # Ensure datetime is timezone-aware.
+ timestamp = _dt_as_utc(timestamp)
+
+ return email.utils.format_datetime(timestamp, usegmt=True)
+
+ if isinstance(timestamp, struct_time):
+ timestamp = mktime(timestamp)
+
+ return email.utils.formatdate(timestamp, usegmt=True)
+
+
+def parse_age(value: str | None = None) -> timedelta | None:
+ """Parses a base-10 integer count of seconds into a timedelta.
+
+ If parsing fails, the return value is `None`.
+
+ :param value: a string consisting of an integer represented in base-10
+ :return: a :class:`datetime.timedelta` object or `None`.
+ """
+ if not value:
+ return None
+ try:
+ seconds = int(value)
+ except ValueError:
+ return None
+ if seconds < 0:
+ return None
+ try:
+ return timedelta(seconds=seconds)
+ except OverflowError:
+ return None
+
+
+def dump_age(age: timedelta | int | None = None) -> str | None:
+ """Formats the duration as a base-10 integer.
+
+ :param age: should be an integer number of seconds,
+ a :class:`datetime.timedelta` object, or,
+ if the age is unknown, `None` (default).
+ """
+ if age is None:
+ return None
+ if isinstance(age, timedelta):
+ age = int(age.total_seconds())
+ else:
+ age = int(age)
+
+ if age < 0:
+ raise ValueError("age cannot be negative")
+
+ return str(age)
+
+
+def is_resource_modified(
+ environ: WSGIEnvironment,
+ etag: str | None = None,
+ data: bytes | None = None,
+ last_modified: datetime | str | None = None,
+ ignore_if_range: bool = True,
+) -> bool:
+ """Convenience method for conditional requests.
+
+ :param environ: the WSGI environment of the request to be checked.
+ :param etag: the etag for the response for comparison.
+ :param data: or alternatively the data of the response to automatically
+ generate an etag using :func:`generate_etag`.
+ :param last_modified: an optional date of the last modification.
+ :param ignore_if_range: If `False`, `If-Range` header will be taken into
+ account.
+ :return: `True` if the resource was modified, otherwise `False`.
+
+ .. versionchanged:: 2.0
+ SHA-1 is used to generate an etag value for the data. MD5 may
+ not be available in some environments.
+
+ .. versionchanged:: 1.0.0
+ The check is run for methods other than ``GET`` and ``HEAD``.
+ """
+ return _sansio_http.is_resource_modified(
+ http_range=environ.get("HTTP_RANGE"),
+ http_if_range=environ.get("HTTP_IF_RANGE"),
+ http_if_modified_since=environ.get("HTTP_IF_MODIFIED_SINCE"),
+ http_if_none_match=environ.get("HTTP_IF_NONE_MATCH"),
+ http_if_match=environ.get("HTTP_IF_MATCH"),
+ etag=etag,
+ data=data,
+ last_modified=last_modified,
+ ignore_if_range=ignore_if_range,
+ )
+
+
+def remove_entity_headers(
+ headers: ds.Headers | list[tuple[str, str]],
+ allowed: t.Iterable[str] = ("expires", "content-location"),
+) -> None:
+ """Remove all entity headers from a list or :class:`Headers` object. This
+ operation works in-place. `Expires` and `Content-Location` headers are
+ by default not removed. The reason for this is :rfc:`2616` section
+ 10.3.5 which specifies some entity headers that should be sent.
+
+ .. versionchanged:: 0.5
+ added `allowed` parameter.
+
+ :param headers: a list or :class:`Headers` object.
+ :param allowed: a list of headers that should still be allowed even though
+ they are entity headers.
+ """
+ allowed = {x.lower() for x in allowed}
+ headers[:] = [
+ (key, value)
+ for key, value in headers
+ if not is_entity_header(key) or key.lower() in allowed
+ ]
+
+
+def remove_hop_by_hop_headers(headers: ds.Headers | list[tuple[str, str]]) -> None:
+ """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
+ :class:`Headers` object. This operation works in-place.
+
+ .. versionadded:: 0.5
+
+ :param headers: a list or :class:`Headers` object.
+ """
+ headers[:] = [
+ (key, value) for key, value in headers if not is_hop_by_hop_header(key)
+ ]
+
+
+def is_entity_header(header: str) -> bool:
+ """Check if a header is an entity header.
+
+ .. versionadded:: 0.5
+
+ :param header: the header to test.
+ :return: `True` if it's an entity header, `False` otherwise.
+ """
+ return header.lower() in _entity_headers
+
+
+def is_hop_by_hop_header(header: str) -> bool:
+ """Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
+
+ .. versionadded:: 0.5
+
+ :param header: the header to test.
+ :return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise.
+ """
+ return header.lower() in _hop_by_hop_headers
+
+
+def parse_cookie(
+ header: WSGIEnvironment | str | None,
+ cls: type[ds.MultiDict[str, str]] | None = None,
+) -> ds.MultiDict[str, str]:
+ """Parse a cookie from a string or WSGI environ.
+
+ The same key can be provided multiple times, the values are stored
+ in-order. The default :class:`MultiDict` will have the first value
+ first, and all values can be retrieved with
+ :meth:`MultiDict.getlist`.
+
+ :param header: The cookie header as a string, or a WSGI environ dict
+ with a ``HTTP_COOKIE`` key.
+ :param cls: A dict-like class to store the parsed cookies in.
+ Defaults to :class:`MultiDict`.
+
+ .. versionchanged:: 3.0
+ Passing bytes, and the ``charset`` and ``errors`` parameters, were removed.
+
+ .. versionchanged:: 1.0
+ Returns a :class:`MultiDict` instead of a ``TypeConversionDict``.
+
+ .. versionchanged:: 0.5
+ Returns a :class:`TypeConversionDict` instead of a regular dict. The ``cls``
+ parameter was added.
+ """
+ if isinstance(header, dict):
+ cookie = header.get("HTTP_COOKIE")
+ else:
+ cookie = header
+
+ if cookie:
+ cookie = cookie.encode("latin1").decode()
+
+ return _sansio_http.parse_cookie(cookie=cookie, cls=cls)
+
+
+_cookie_no_quote_re = re.compile(r"[\w!#$%&'()*+\-./:<=>?@\[\]^`{|}~]*", re.A)
+_cookie_slash_re = re.compile(rb"[\x00-\x19\",;\\\x7f-\xff]", re.A)
+_cookie_slash_map = {b'"': b'\\"', b"\\": b"\\\\"}
+_cookie_slash_map.update(
+ (v.to_bytes(1, "big"), b"\\%03o" % v)
+ for v in [*range(0x20), *b",;", *range(0x7F, 256)]
+)
+
+
+def dump_cookie(
+ key: str,
+ value: str = "",
+ max_age: timedelta | int | None = None,
+ expires: str | datetime | int | float | None = None,
+ path: str | None = "/",
+ domain: str | None = None,
+ secure: bool = False,
+ httponly: bool = False,
+ sync_expires: bool = True,
+ max_size: int = 4093,
+ samesite: str | None = None,
+) -> str:
+ """Create a Set-Cookie header without the ``Set-Cookie`` prefix.
+
+ The return value is usually restricted to ascii as the vast majority
+ of values are properly escaped, but that is no guarantee. It's
+ tunneled through latin1 as required by :pep:`3333`.
+
+ The return value is not ASCII safe if the key contains unicode
+ characters. This is technically against the specification but
+ happens in the wild. It's strongly recommended to not use
+ non-ASCII values for the keys.
+
+ :param max_age: should be a number of seconds, or `None` (default) if
+ the cookie should last only as long as the client's
+ browser session. Additionally `timedelta` objects
+ are accepted, too.
+ :param expires: should be a `datetime` object or unix timestamp.
+ :param path: limits the cookie to a given path, per default it will
+ span the whole domain.
+ :param domain: Use this if you want to set a cross-domain cookie. For
+ example, ``domain="example.com"`` will set a cookie
+ that is readable by the domain ``www.example.com``,
+ ``foo.example.com`` etc. Otherwise, a cookie will only
+ be readable by the domain that set it.
+ :param secure: The cookie will only be available via HTTPS
+ :param httponly: disallow JavaScript to access the cookie. This is an
+ extension to the cookie standard and probably not
+ supported by all browsers.
+ :param charset: the encoding for string values.
+ :param sync_expires: automatically set expires if max_age is defined
+ but expires not.
+ :param max_size: Warn if the final header value exceeds this size. The
+ default, 4093, should be safely `supported by most browsers
+ <cookie_>`_. Set to 0 to disable this check.
+ :param samesite: Limits the scope of the cookie such that it will
+ only be attached to requests if those requests are same-site.
+
+ .. _`cookie`: http://browsercookielimits.squawky.net/
+
+ .. versionchanged:: 3.0
+ Passing bytes, and the ``charset`` parameter, were removed.
+
+ .. versionchanged:: 2.3.3
+ The ``path`` parameter is ``/`` by default.
+
+ .. versionchanged:: 2.3.1
+ The value allows more characters without quoting.
+
+ .. versionchanged:: 2.3
+ ``localhost`` and other names without a dot are allowed for the domain. A
+ leading dot is ignored.
+
+ .. versionchanged:: 2.3
+ The ``path`` parameter is ``None`` by default.
+
+ .. versionchanged:: 1.0.0
+ The string ``'None'`` is accepted for ``samesite``.
+ """
+ if path is not None:
+ # safe = https://url.spec.whatwg.org/#url-path-segment-string
+ # as well as percent for things that are already quoted
+ # excluding semicolon since it's part of the header syntax
+ path = quote(path, safe="%!$&'()*+,/:=@")
+
+ if domain:
+ domain = domain.partition(":")[0].lstrip(".").encode("idna").decode("ascii")
+
+ if isinstance(max_age, timedelta):
+ max_age = int(max_age.total_seconds())
+
+ if expires is not None:
+ if not isinstance(expires, str):
+ expires = http_date(expires)
+ elif max_age is not None and sync_expires:
+ expires = http_date(datetime.now(tz=timezone.utc).timestamp() + max_age)
+
+ if samesite is not None:
+ samesite = samesite.title()
+
+ if samesite not in {"Strict", "Lax", "None"}:
+ raise ValueError("SameSite must be 'Strict', 'Lax', or 'None'.")
+
+ # Quote value if it contains characters not allowed by RFC 6265. Slash-escape with
+ # three octal digits, which matches http.cookies, although the RFC suggests base64.
+ if not _cookie_no_quote_re.fullmatch(value):
+ # Work with bytes here, since a UTF-8 character could be multiple bytes.
+ value = _cookie_slash_re.sub(
+ lambda m: _cookie_slash_map[m.group()], value.encode()
+ ).decode("ascii")
+ value = f'"{value}"'
+
+ # Send a non-ASCII key as mojibake. Everything else should already be ASCII.
+ # TODO Remove encoding dance, it seems like clients accept UTF-8 keys
+ buf = [f"{key.encode().decode('latin1')}={value}"]
+
+ for k, v in (
+ ("Domain", domain),
+ ("Expires", expires),
+ ("Max-Age", max_age),
+ ("Secure", secure),
+ ("HttpOnly", httponly),
+ ("Path", path),
+ ("SameSite", samesite),
+ ):
+ if v is None or v is False:
+ continue
+
+ if v is True:
+ buf.append(k)
+ continue
+
+ buf.append(f"{k}={v}")
+
+ rv = "; ".join(buf)
+
+ # Warn if the final value of the cookie is larger than the limit. If the cookie is
+ # too large, then it may be silently ignored by the browser, which can be quite hard
+ # to debug.
+ cookie_size = len(rv)
+
+ if max_size and cookie_size > max_size:
+ value_size = len(value)
+ warnings.warn(
+ f"The '{key}' cookie is too large: the value was {value_size} bytes but the"
+ f" header required {cookie_size - value_size} extra bytes. The final size"
+ f" was {cookie_size} bytes but the limit is {max_size} bytes. Browsers may"
+ " silently ignore cookies larger than this.",
+ stacklevel=2,
+ )
+
+ return rv
+
+
+def is_byte_range_valid(
+ start: int | None, stop: int | None, length: int | None
+) -> bool:
+ """Checks if a given byte content range is valid for the given length.
+
+ .. versionadded:: 0.7
+ """
+ if (start is None) != (stop is None):
+ return False
+ elif start is None:
+ return length is None or length >= 0
+ elif length is None:
+ return 0 <= start < stop # type: ignore
+ elif start >= stop: # type: ignore
+ return False
+ return 0 <= start < length
+
+
+# circular dependencies
+from . import datastructures as ds
+from .sansio import http as _sansio_http
diff --git a/venv/lib/python3.8/site-packages/werkzeug/local.py b/venv/lib/python3.8/site-packages/werkzeug/local.py
new file mode 100644
index 0000000..302589b
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/local.py
@@ -0,0 +1,653 @@
+from __future__ import annotations
+
+import copy
+import math
+import operator
+import typing as t
+from contextvars import ContextVar
+from functools import partial
+from functools import update_wrapper
+from operator import attrgetter
+
+from .wsgi import ClosingIterator
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+T = t.TypeVar("T")
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+
+def release_local(local: Local | LocalStack[t.Any]) -> None:
+ """Release the data for the current context in a :class:`Local` or
+ :class:`LocalStack` without using a :class:`LocalManager`.
+
+ This should not be needed for modern use cases, and may be removed
+ in the future.
+
+ .. versionadded:: 0.6.1
+ """
+ local.__release_local__()
+
+
+class Local:
+ """Create a namespace of context-local data. This wraps a
+ :class:`ContextVar` containing a :class:`dict` value.
+
+ This may incur a performance penalty compared to using individual
+ context vars, as it has to copy data to avoid mutating the dict
+ between nested contexts.
+
+ :param context_var: The :class:`~contextvars.ContextVar` to use as
+ storage for this local. If not given, one will be created.
+ Context vars not created at the global scope may interfere with
+ garbage collection.
+
+ .. versionchanged:: 2.0
+ Uses ``ContextVar`` instead of a custom storage implementation.
+ """
+
+ __slots__ = ("__storage",)
+
+ def __init__(self, context_var: ContextVar[dict[str, t.Any]] | None = None) -> None:
+ if context_var is None:
+ # A ContextVar not created at global scope interferes with
+ # Python's garbage collection. However, a local only makes
+ # sense defined at the global scope as well, in which case
+ # the GC issue doesn't seem relevant.
+ context_var = ContextVar(f"werkzeug.Local<{id(self)}>.storage")
+
+ object.__setattr__(self, "_Local__storage", context_var)
+
+ def __iter__(self) -> t.Iterator[tuple[str, t.Any]]:
+ return iter(self.__storage.get({}).items())
+
+ def __call__(
+ self, name: str, *, unbound_message: str | None = None
+ ) -> LocalProxy[t.Any]:
+ """Create a :class:`LocalProxy` that access an attribute on this
+ local namespace.
+
+ :param name: Proxy this attribute.
+ :param unbound_message: The error message that the proxy will
+ show if the attribute isn't set.
+ """
+ return LocalProxy(self, name, unbound_message=unbound_message)
+
+ def __release_local__(self) -> None:
+ self.__storage.set({})
+
+ def __getattr__(self, name: str) -> t.Any:
+ values = self.__storage.get({})
+
+ if name in values:
+ return values[name]
+
+ raise AttributeError(name)
+
+ def __setattr__(self, name: str, value: t.Any) -> None:
+ values = self.__storage.get({}).copy()
+ values[name] = value
+ self.__storage.set(values)
+
+ def __delattr__(self, name: str) -> None:
+ values = self.__storage.get({})
+
+ if name in values:
+ values = values.copy()
+ del values[name]
+ self.__storage.set(values)
+ else:
+ raise AttributeError(name)
+
+
+class LocalStack(t.Generic[T]):
+ """Create a stack of context-local data. This wraps a
+ :class:`ContextVar` containing a :class:`list` value.
+
+ This may incur a performance penalty compared to using individual
+ context vars, as it has to copy data to avoid mutating the list
+ between nested contexts.
+
+ :param context_var: The :class:`~contextvars.ContextVar` to use as
+ storage for this local. If not given, one will be created.
+ Context vars not created at the global scope may interfere with
+ garbage collection.
+
+ .. versionchanged:: 2.0
+ Uses ``ContextVar`` instead of a custom storage implementation.
+
+ .. versionadded:: 0.6.1
+ """
+
+ __slots__ = ("_storage",)
+
+ def __init__(self, context_var: ContextVar[list[T]] | None = None) -> None:
+ if context_var is None:
+ # A ContextVar not created at global scope interferes with
+ # Python's garbage collection. However, a local only makes
+ # sense defined at the global scope as well, in which case
+ # the GC issue doesn't seem relevant.
+ context_var = ContextVar(f"werkzeug.LocalStack<{id(self)}>.storage")
+
+ self._storage = context_var
+
+ def __release_local__(self) -> None:
+ self._storage.set([])
+
+ def push(self, obj: T) -> list[T]:
+ """Add a new item to the top of the stack."""
+ stack = self._storage.get([]).copy()
+ stack.append(obj)
+ self._storage.set(stack)
+ return stack
+
+ def pop(self) -> T | None:
+ """Remove the top item from the stack and return it. If the
+ stack is empty, return ``None``.
+ """
+ stack = self._storage.get([])
+
+ if len(stack) == 0:
+ return None
+
+ rv = stack[-1]
+ self._storage.set(stack[:-1])
+ return rv
+
+ @property
+ def top(self) -> T | None:
+ """The topmost item on the stack. If the stack is empty,
+ `None` is returned.
+ """
+ stack = self._storage.get([])
+
+ if len(stack) == 0:
+ return None
+
+ return stack[-1]
+
+ def __call__(
+ self, name: str | None = None, *, unbound_message: str | None = None
+ ) -> LocalProxy[t.Any]:
+ """Create a :class:`LocalProxy` that accesses the top of this
+ local stack.
+
+ :param name: If given, the proxy access this attribute of the
+ top item, rather than the item itself.
+ :param unbound_message: The error message that the proxy will
+ show if the stack is empty.
+ """
+ return LocalProxy(self, name, unbound_message=unbound_message)
+
+
+class LocalManager:
+ """Manage releasing the data for the current context in one or more
+ :class:`Local` and :class:`LocalStack` objects.
+
+ This should not be needed for modern use cases, and may be removed
+ in the future.
+
+ :param locals: A local or list of locals to manage.
+
+ .. versionchanged:: 2.1
+ The ``ident_func`` was removed.
+
+ .. versionchanged:: 0.7
+ The ``ident_func`` parameter was added.
+
+ .. versionchanged:: 0.6.1
+ The :func:`release_local` function can be used instead of a
+ manager.
+ """
+
+ __slots__ = ("locals",)
+
+ def __init__(
+ self,
+ locals: None
+ | (Local | LocalStack[t.Any] | t.Iterable[Local | LocalStack[t.Any]]) = None,
+ ) -> None:
+ if locals is None:
+ self.locals = []
+ elif isinstance(locals, Local):
+ self.locals = [locals]
+ else:
+ self.locals = list(locals) # type: ignore[arg-type]
+
+ def cleanup(self) -> None:
+ """Release the data in the locals for this context. Call this at
+ the end of each request or use :meth:`make_middleware`.
+ """
+ for local in self.locals:
+ release_local(local)
+
+ def make_middleware(self, app: WSGIApplication) -> WSGIApplication:
+ """Wrap a WSGI application so that local data is released
+ automatically after the response has been sent for a request.
+ """
+
+ def application(
+ environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ return ClosingIterator(app(environ, start_response), self.cleanup)
+
+ return application
+
+ def middleware(self, func: WSGIApplication) -> WSGIApplication:
+ """Like :meth:`make_middleware` but used as a decorator on the
+ WSGI application function.
+
+ .. code-block:: python
+
+ @manager.middleware
+ def application(environ, start_response):
+ ...
+ """
+ return update_wrapper(self.make_middleware(func), func)
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} storages: {len(self.locals)}>"
+
+
+class _ProxyLookup:
+ """Descriptor that handles proxied attribute lookup for
+ :class:`LocalProxy`.
+
+ :param f: The built-in function this attribute is accessed through.
+ Instead of looking up the special method, the function call
+ is redone on the object.
+ :param fallback: Return this function if the proxy is unbound
+ instead of raising a :exc:`RuntimeError`.
+ :param is_attr: This proxied name is an attribute, not a function.
+ Call the fallback immediately to get the value.
+ :param class_value: Value to return when accessed from the
+ ``LocalProxy`` class directly. Used for ``__doc__`` so building
+ docs still works.
+ """
+
+ __slots__ = ("bind_f", "fallback", "is_attr", "class_value", "name")
+
+ def __init__(
+ self,
+ f: t.Callable[..., t.Any] | None = None,
+ fallback: t.Callable[[LocalProxy[t.Any]], t.Any] | None = None,
+ class_value: t.Any | None = None,
+ is_attr: bool = False,
+ ) -> None:
+ bind_f: t.Callable[[LocalProxy[t.Any], t.Any], t.Callable[..., t.Any]] | None
+
+ if hasattr(f, "__get__"):
+ # A Python function, can be turned into a bound method.
+
+ def bind_f(
+ instance: LocalProxy[t.Any], obj: t.Any
+ ) -> t.Callable[..., t.Any]:
+ return f.__get__(obj, type(obj)) # type: ignore
+
+ elif f is not None:
+ # A C function, use partial to bind the first argument.
+
+ def bind_f(
+ instance: LocalProxy[t.Any], obj: t.Any
+ ) -> t.Callable[..., t.Any]:
+ return partial(f, obj)
+
+ else:
+ # Use getattr, which will produce a bound method.
+ bind_f = None
+
+ self.bind_f = bind_f
+ self.fallback = fallback
+ self.class_value = class_value
+ self.is_attr = is_attr
+
+ def __set_name__(self, owner: LocalProxy[t.Any], name: str) -> None:
+ self.name = name
+
+ def __get__(self, instance: LocalProxy[t.Any], owner: type | None = None) -> t.Any:
+ if instance is None:
+ if self.class_value is not None:
+ return self.class_value
+
+ return self
+
+ try:
+ obj = instance._get_current_object()
+ except RuntimeError:
+ if self.fallback is None:
+ raise
+
+ fallback = self.fallback.__get__(instance, owner)
+
+ if self.is_attr:
+ # __class__ and __doc__ are attributes, not methods.
+ # Call the fallback to get the value.
+ return fallback()
+
+ return fallback
+
+ if self.bind_f is not None:
+ return self.bind_f(instance, obj)
+
+ return getattr(obj, self.name)
+
+ def __repr__(self) -> str:
+ return f"proxy {self.name}"
+
+ def __call__(
+ self, instance: LocalProxy[t.Any], *args: t.Any, **kwargs: t.Any
+ ) -> t.Any:
+ """Support calling unbound methods from the class. For example,
+ this happens with ``copy.copy``, which does
+ ``type(x).__copy__(x)``. ``type(x)`` can't be proxied, so it
+ returns the proxy type and descriptor.
+ """
+ return self.__get__(instance, type(instance))(*args, **kwargs)
+
+
+class _ProxyIOp(_ProxyLookup):
+ """Look up an augmented assignment method on a proxied object. The
+ method is wrapped to return the proxy instead of the object.
+ """
+
+ __slots__ = ()
+
+ def __init__(
+ self,
+ f: t.Callable[..., t.Any] | None = None,
+ fallback: t.Callable[[LocalProxy[t.Any]], t.Any] | None = None,
+ ) -> None:
+ super().__init__(f, fallback)
+
+ def bind_f(instance: LocalProxy[t.Any], obj: t.Any) -> t.Callable[..., t.Any]:
+ def i_op(self: t.Any, other: t.Any) -> LocalProxy[t.Any]:
+ f(self, other) # type: ignore
+ return instance
+
+ return i_op.__get__(obj, type(obj)) # type: ignore
+
+ self.bind_f = bind_f
+
+
+def _l_to_r_op(op: F) -> F:
+ """Swap the argument order to turn an l-op into an r-op."""
+
+ def r_op(obj: t.Any, other: t.Any) -> t.Any:
+ return op(other, obj)
+
+ return t.cast(F, r_op)
+
+
+def _identity(o: T) -> T:
+ return o
+
+
+class LocalProxy(t.Generic[T]):
+ """A proxy to the object bound to a context-local object. All
+ operations on the proxy are forwarded to the bound object. If no
+ object is bound, a ``RuntimeError`` is raised.
+
+ :param local: The context-local object that provides the proxied
+ object.
+ :param name: Proxy this attribute from the proxied object.
+ :param unbound_message: The error message to show if the
+ context-local object is unbound.
+
+ Proxy a :class:`~contextvars.ContextVar` to make it easier to
+ access. Pass a name to proxy that attribute.
+
+ .. code-block:: python
+
+ _request_var = ContextVar("request")
+ request = LocalProxy(_request_var)
+ session = LocalProxy(_request_var, "session")
+
+ Proxy an attribute on a :class:`Local` namespace by calling the
+ local with the attribute name:
+
+ .. code-block:: python
+
+ data = Local()
+ user = data("user")
+
+ Proxy the top item on a :class:`LocalStack` by calling the local.
+ Pass a name to proxy that attribute.
+
+ .. code-block::
+
+ app_stack = LocalStack()
+ current_app = app_stack()
+ g = app_stack("g")
+
+ Pass a function to proxy the return value from that function. This
+ was previously used to access attributes of local objects before
+ that was supported directly.
+
+ .. code-block:: python
+
+ session = LocalProxy(lambda: request.session)
+
+ ``__repr__`` and ``__class__`` are proxied, so ``repr(x)`` and
+ ``isinstance(x, cls)`` will look like the proxied object. Use
+ ``issubclass(type(x), LocalProxy)`` to check if an object is a
+ proxy.
+
+ .. code-block:: python
+
+ repr(user) # <User admin>
+ isinstance(user, User) # True
+ issubclass(type(user), LocalProxy) # True
+
+ .. versionchanged:: 2.2.2
+ ``__wrapped__`` is set when wrapping an object, not only when
+ wrapping a function, to prevent doctest from failing.
+
+ .. versionchanged:: 2.2
+ Can proxy a ``ContextVar`` or ``LocalStack`` directly.
+
+ .. versionchanged:: 2.2
+ The ``name`` parameter can be used with any proxied object, not
+ only ``Local``.
+
+ .. versionchanged:: 2.2
+ Added the ``unbound_message`` parameter.
+
+ .. versionchanged:: 2.0
+ Updated proxied attributes and methods to reflect the current
+ data model.
+
+ .. versionchanged:: 0.6.1
+ The class can be instantiated with a callable.
+ """
+
+ __slots__ = ("__wrapped", "_get_current_object")
+
+ _get_current_object: t.Callable[[], T]
+ """Return the current object this proxy is bound to. If the proxy is
+ unbound, this raises a ``RuntimeError``.
+
+ This should be used if you need to pass the object to something that
+ doesn't understand the proxy. It can also be useful for performance
+ if you are accessing the object multiple times in a function, rather
+ than going through the proxy multiple times.
+ """
+
+ def __init__(
+ self,
+ local: ContextVar[T] | Local | LocalStack[T] | t.Callable[[], T],
+ name: str | None = None,
+ *,
+ unbound_message: str | None = None,
+ ) -> None:
+ if name is None:
+ get_name = _identity
+ else:
+ get_name = attrgetter(name) # type: ignore[assignment]
+
+ if unbound_message is None:
+ unbound_message = "object is not bound"
+
+ if isinstance(local, Local):
+ if name is None:
+ raise TypeError("'name' is required when proxying a 'Local' object.")
+
+ def _get_current_object() -> T:
+ try:
+ return get_name(local) # type: ignore[return-value]
+ except AttributeError:
+ raise RuntimeError(unbound_message) from None
+
+ elif isinstance(local, LocalStack):
+
+ def _get_current_object() -> T:
+ obj = local.top
+
+ if obj is None:
+ raise RuntimeError(unbound_message)
+
+ return get_name(obj)
+
+ elif isinstance(local, ContextVar):
+
+ def _get_current_object() -> T:
+ try:
+ obj = local.get()
+ except LookupError:
+ raise RuntimeError(unbound_message) from None
+
+ return get_name(obj)
+
+ elif callable(local):
+
+ def _get_current_object() -> T:
+ return get_name(local())
+
+ else:
+ raise TypeError(f"Don't know how to proxy '{type(local)}'.")
+
+ object.__setattr__(self, "_LocalProxy__wrapped", local)
+ object.__setattr__(self, "_get_current_object", _get_current_object)
+
+ __doc__ = _ProxyLookup( # type: ignore[assignment]
+ class_value=__doc__, fallback=lambda self: type(self).__doc__, is_attr=True
+ )
+ __wrapped__ = _ProxyLookup(
+ fallback=lambda self: self._LocalProxy__wrapped, # type: ignore[attr-defined]
+ is_attr=True,
+ )
+ # __del__ should only delete the proxy
+ __repr__ = _ProxyLookup( # type: ignore[assignment]
+ repr, fallback=lambda self: f"<{type(self).__name__} unbound>"
+ )
+ __str__ = _ProxyLookup(str) # type: ignore[assignment]
+ __bytes__ = _ProxyLookup(bytes)
+ __format__ = _ProxyLookup() # type: ignore[assignment]
+ __lt__ = _ProxyLookup(operator.lt)
+ __le__ = _ProxyLookup(operator.le)
+ __eq__ = _ProxyLookup(operator.eq) # type: ignore[assignment]
+ __ne__ = _ProxyLookup(operator.ne) # type: ignore[assignment]
+ __gt__ = _ProxyLookup(operator.gt)
+ __ge__ = _ProxyLookup(operator.ge)
+ __hash__ = _ProxyLookup(hash) # type: ignore[assignment]
+ __bool__ = _ProxyLookup(bool, fallback=lambda self: False)
+ __getattr__ = _ProxyLookup(getattr)
+ # __getattribute__ triggered through __getattr__
+ __setattr__ = _ProxyLookup(setattr) # type: ignore[assignment]
+ __delattr__ = _ProxyLookup(delattr) # type: ignore[assignment]
+ __dir__ = _ProxyLookup(dir, fallback=lambda self: []) # type: ignore[assignment]
+ # __get__ (proxying descriptor not supported)
+ # __set__ (descriptor)
+ # __delete__ (descriptor)
+ # __set_name__ (descriptor)
+ # __objclass__ (descriptor)
+ # __slots__ used by proxy itself
+ # __dict__ (__getattr__)
+ # __weakref__ (__getattr__)
+ # __init_subclass__ (proxying metaclass not supported)
+ # __prepare__ (metaclass)
+ __class__ = _ProxyLookup(fallback=lambda self: type(self), is_attr=True) # type: ignore[assignment]
+ __instancecheck__ = _ProxyLookup(lambda self, other: isinstance(other, self))
+ __subclasscheck__ = _ProxyLookup(lambda self, other: issubclass(other, self))
+ # __class_getitem__ triggered through __getitem__
+ __call__ = _ProxyLookup(lambda self, *args, **kwargs: self(*args, **kwargs))
+ __len__ = _ProxyLookup(len)
+ __length_hint__ = _ProxyLookup(operator.length_hint)
+ __getitem__ = _ProxyLookup(operator.getitem)
+ __setitem__ = _ProxyLookup(operator.setitem)
+ __delitem__ = _ProxyLookup(operator.delitem)
+ # __missing__ triggered through __getitem__
+ __iter__ = _ProxyLookup(iter)
+ __next__ = _ProxyLookup(next)
+ __reversed__ = _ProxyLookup(reversed)
+ __contains__ = _ProxyLookup(operator.contains)
+ __add__ = _ProxyLookup(operator.add)
+ __sub__ = _ProxyLookup(operator.sub)
+ __mul__ = _ProxyLookup(operator.mul)
+ __matmul__ = _ProxyLookup(operator.matmul)
+ __truediv__ = _ProxyLookup(operator.truediv)
+ __floordiv__ = _ProxyLookup(operator.floordiv)
+ __mod__ = _ProxyLookup(operator.mod)
+ __divmod__ = _ProxyLookup(divmod)
+ __pow__ = _ProxyLookup(pow)
+ __lshift__ = _ProxyLookup(operator.lshift)
+ __rshift__ = _ProxyLookup(operator.rshift)
+ __and__ = _ProxyLookup(operator.and_)
+ __xor__ = _ProxyLookup(operator.xor)
+ __or__ = _ProxyLookup(operator.or_)
+ __radd__ = _ProxyLookup(_l_to_r_op(operator.add))
+ __rsub__ = _ProxyLookup(_l_to_r_op(operator.sub))
+ __rmul__ = _ProxyLookup(_l_to_r_op(operator.mul))
+ __rmatmul__ = _ProxyLookup(_l_to_r_op(operator.matmul))
+ __rtruediv__ = _ProxyLookup(_l_to_r_op(operator.truediv))
+ __rfloordiv__ = _ProxyLookup(_l_to_r_op(operator.floordiv))
+ __rmod__ = _ProxyLookup(_l_to_r_op(operator.mod))
+ __rdivmod__ = _ProxyLookup(_l_to_r_op(divmod))
+ __rpow__ = _ProxyLookup(_l_to_r_op(pow))
+ __rlshift__ = _ProxyLookup(_l_to_r_op(operator.lshift))
+ __rrshift__ = _ProxyLookup(_l_to_r_op(operator.rshift))
+ __rand__ = _ProxyLookup(_l_to_r_op(operator.and_))
+ __rxor__ = _ProxyLookup(_l_to_r_op(operator.xor))
+ __ror__ = _ProxyLookup(_l_to_r_op(operator.or_))
+ __iadd__ = _ProxyIOp(operator.iadd)
+ __isub__ = _ProxyIOp(operator.isub)
+ __imul__ = _ProxyIOp(operator.imul)
+ __imatmul__ = _ProxyIOp(operator.imatmul)
+ __itruediv__ = _ProxyIOp(operator.itruediv)
+ __ifloordiv__ = _ProxyIOp(operator.ifloordiv)
+ __imod__ = _ProxyIOp(operator.imod)
+ __ipow__ = _ProxyIOp(operator.ipow)
+ __ilshift__ = _ProxyIOp(operator.ilshift)
+ __irshift__ = _ProxyIOp(operator.irshift)
+ __iand__ = _ProxyIOp(operator.iand)
+ __ixor__ = _ProxyIOp(operator.ixor)
+ __ior__ = _ProxyIOp(operator.ior)
+ __neg__ = _ProxyLookup(operator.neg)
+ __pos__ = _ProxyLookup(operator.pos)
+ __abs__ = _ProxyLookup(abs)
+ __invert__ = _ProxyLookup(operator.invert)
+ __complex__ = _ProxyLookup(complex)
+ __int__ = _ProxyLookup(int)
+ __float__ = _ProxyLookup(float)
+ __index__ = _ProxyLookup(operator.index)
+ __round__ = _ProxyLookup(round)
+ __trunc__ = _ProxyLookup(math.trunc)
+ __floor__ = _ProxyLookup(math.floor)
+ __ceil__ = _ProxyLookup(math.ceil)
+ __enter__ = _ProxyLookup()
+ __exit__ = _ProxyLookup()
+ __await__ = _ProxyLookup()
+ __aiter__ = _ProxyLookup()
+ __anext__ = _ProxyLookup()
+ __aenter__ = _ProxyLookup()
+ __aexit__ = _ProxyLookup()
+ __copy__ = _ProxyLookup(copy.copy)
+ __deepcopy__ = _ProxyLookup(copy.deepcopy)
+ # __getnewargs_ex__ (pickle through proxy not supported)
+ # __getnewargs__ (pickle)
+ # __getstate__ (pickle)
+ # __setstate__ (pickle)
+ # __reduce__ (pickle)
+ # __reduce_ex__ (pickle)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/middleware/__init__.py b/venv/lib/python3.8/site-packages/werkzeug/middleware/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/middleware/__init__.py
diff --git a/venv/lib/python3.8/site-packages/werkzeug/middleware/dispatcher.py b/venv/lib/python3.8/site-packages/werkzeug/middleware/dispatcher.py
new file mode 100644
index 0000000..e11bacc
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/middleware/dispatcher.py
@@ -0,0 +1,81 @@
+"""
+Application Dispatcher
+======================
+
+This middleware creates a single WSGI application that dispatches to
+multiple other WSGI applications mounted at different URL paths.
+
+A common example is writing a Single Page Application, where you have a
+backend API and a frontend written in JavaScript that does the routing
+in the browser rather than requesting different pages from the server.
+The frontend is a single HTML and JS file that should be served for any
+path besides "/api".
+
+This example dispatches to an API app under "/api", an admin app
+under "/admin", and an app that serves frontend files for all other
+requests::
+
+ app = DispatcherMiddleware(serve_frontend, {
+ '/api': api_app,
+ '/admin': admin_app,
+ })
+
+In production, you might instead handle this at the HTTP server level,
+serving files or proxying to application servers based on location. The
+API and admin apps would each be deployed with a separate WSGI server,
+and the static files would be served directly by the HTTP server.
+
+.. autoclass:: DispatcherMiddleware
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+
+from __future__ import annotations
+
+import typing as t
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+class DispatcherMiddleware:
+ """Combine multiple applications as a single WSGI application.
+ Requests are dispatched to an application based on the path it is
+ mounted under.
+
+ :param app: The WSGI application to dispatch to if the request
+ doesn't match a mounted path.
+ :param mounts: Maps path prefixes to applications for dispatching.
+ """
+
+ def __init__(
+ self,
+ app: WSGIApplication,
+ mounts: dict[str, WSGIApplication] | None = None,
+ ) -> None:
+ self.app = app
+ self.mounts = mounts or {}
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ script = environ.get("PATH_INFO", "")
+ path_info = ""
+
+ while "/" in script:
+ if script in self.mounts:
+ app = self.mounts[script]
+ break
+
+ script, last_item = script.rsplit("/", 1)
+ path_info = f"/{last_item}{path_info}"
+ else:
+ app = self.mounts.get(script, self.app)
+
+ original_script_name = environ.get("SCRIPT_NAME", "")
+ environ["SCRIPT_NAME"] = original_script_name + script
+ environ["PATH_INFO"] = path_info
+ return app(environ, start_response)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/middleware/http_proxy.py b/venv/lib/python3.8/site-packages/werkzeug/middleware/http_proxy.py
new file mode 100644
index 0000000..5e23915
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/middleware/http_proxy.py
@@ -0,0 +1,236 @@
+"""
+Basic HTTP Proxy
+================
+
+.. autoclass:: ProxyMiddleware
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+
+from __future__ import annotations
+
+import typing as t
+from http import client
+from urllib.parse import quote
+from urllib.parse import urlsplit
+
+from ..datastructures import EnvironHeaders
+from ..http import is_hop_by_hop_header
+from ..wsgi import get_input_stream
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+class ProxyMiddleware:
+ """Proxy requests under a path to an external server, routing other
+ requests to the app.
+
+ This middleware can only proxy HTTP requests, as HTTP is the only
+ protocol handled by the WSGI server. Other protocols, such as
+ WebSocket requests, cannot be proxied at this layer. This should
+ only be used for development, in production a real proxy server
+ should be used.
+
+ The middleware takes a dict mapping a path prefix to a dict
+ describing the host to be proxied to::
+
+ app = ProxyMiddleware(app, {
+ "/static/": {
+ "target": "http://127.0.0.1:5001/",
+ }
+ })
+
+ Each host has the following options:
+
+ ``target``:
+ The target URL to dispatch to. This is required.
+ ``remove_prefix``:
+ Whether to remove the prefix from the URL before dispatching it
+ to the target. The default is ``False``.
+ ``host``:
+ ``"<auto>"`` (default):
+ The host header is automatically rewritten to the URL of the
+ target.
+ ``None``:
+ The host header is unmodified from the client request.
+ Any other value:
+ The host header is overwritten with the value.
+ ``headers``:
+ A dictionary of headers to be sent with the request to the
+ target. The default is ``{}``.
+ ``ssl_context``:
+ A :class:`ssl.SSLContext` defining how to verify requests if the
+ target is HTTPS. The default is ``None``.
+
+ In the example above, everything under ``"/static/"`` is proxied to
+ the server on port 5001. The host header is rewritten to the target,
+ and the ``"/static/"`` prefix is removed from the URLs.
+
+ :param app: The WSGI application to wrap.
+ :param targets: Proxy target configurations. See description above.
+ :param chunk_size: Size of chunks to read from input stream and
+ write to target.
+ :param timeout: Seconds before an operation to a target fails.
+
+ .. versionadded:: 0.14
+ """
+
+ def __init__(
+ self,
+ app: WSGIApplication,
+ targets: t.Mapping[str, dict[str, t.Any]],
+ chunk_size: int = 2 << 13,
+ timeout: int = 10,
+ ) -> None:
+ def _set_defaults(opts: dict[str, t.Any]) -> dict[str, t.Any]:
+ opts.setdefault("remove_prefix", False)
+ opts.setdefault("host", "<auto>")
+ opts.setdefault("headers", {})
+ opts.setdefault("ssl_context", None)
+ return opts
+
+ self.app = app
+ self.targets = {
+ f"/{k.strip('/')}/": _set_defaults(v) for k, v in targets.items()
+ }
+ self.chunk_size = chunk_size
+ self.timeout = timeout
+
+ def proxy_to(
+ self, opts: dict[str, t.Any], path: str, prefix: str
+ ) -> WSGIApplication:
+ target = urlsplit(opts["target"])
+ # socket can handle unicode host, but header must be ascii
+ host = target.hostname.encode("idna").decode("ascii")
+
+ def application(
+ environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ headers = list(EnvironHeaders(environ).items())
+ headers[:] = [
+ (k, v)
+ for k, v in headers
+ if not is_hop_by_hop_header(k)
+ and k.lower() not in ("content-length", "host")
+ ]
+ headers.append(("Connection", "close"))
+
+ if opts["host"] == "<auto>":
+ headers.append(("Host", host))
+ elif opts["host"] is None:
+ headers.append(("Host", environ["HTTP_HOST"]))
+ else:
+ headers.append(("Host", opts["host"]))
+
+ headers.extend(opts["headers"].items())
+ remote_path = path
+
+ if opts["remove_prefix"]:
+ remote_path = remote_path[len(prefix) :].lstrip("/")
+ remote_path = f"{target.path.rstrip('/')}/{remote_path}"
+
+ content_length = environ.get("CONTENT_LENGTH")
+ chunked = False
+
+ if content_length not in ("", None):
+ headers.append(("Content-Length", content_length)) # type: ignore
+ elif content_length is not None:
+ headers.append(("Transfer-Encoding", "chunked"))
+ chunked = True
+
+ try:
+ if target.scheme == "http":
+ con = client.HTTPConnection(
+ host, target.port or 80, timeout=self.timeout
+ )
+ elif target.scheme == "https":
+ con = client.HTTPSConnection(
+ host,
+ target.port or 443,
+ timeout=self.timeout,
+ context=opts["ssl_context"],
+ )
+ else:
+ raise RuntimeError(
+ "Target scheme must be 'http' or 'https', got"
+ f" {target.scheme!r}."
+ )
+
+ con.connect()
+ # safe = https://url.spec.whatwg.org/#url-path-segment-string
+ # as well as percent for things that are already quoted
+ remote_url = quote(remote_path, safe="!$&'()*+,/:;=@%")
+ querystring = environ["QUERY_STRING"]
+
+ if querystring:
+ remote_url = f"{remote_url}?{querystring}"
+
+ con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
+
+ for k, v in headers:
+ if k.lower() == "connection":
+ v = "close"
+
+ con.putheader(k, v)
+
+ con.endheaders()
+ stream = get_input_stream(environ)
+
+ while True:
+ data = stream.read(self.chunk_size)
+
+ if not data:
+ break
+
+ if chunked:
+ con.send(b"%x\r\n%s\r\n" % (len(data), data))
+ else:
+ con.send(data)
+
+ resp = con.getresponse()
+ except OSError:
+ from ..exceptions import BadGateway
+
+ return BadGateway()(environ, start_response)
+
+ start_response(
+ f"{resp.status} {resp.reason}",
+ [
+ (k.title(), v)
+ for k, v in resp.getheaders()
+ if not is_hop_by_hop_header(k)
+ ],
+ )
+
+ def read() -> t.Iterator[bytes]:
+ while True:
+ try:
+ data = resp.read(self.chunk_size)
+ except OSError:
+ break
+
+ if not data:
+ break
+
+ yield data
+
+ return read()
+
+ return application
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ path = environ["PATH_INFO"]
+ app = self.app
+
+ for prefix, opts in self.targets.items():
+ if path.startswith(prefix):
+ app = self.proxy_to(opts, path, prefix)
+ break
+
+ return app(environ, start_response)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/middleware/lint.py b/venv/lib/python3.8/site-packages/werkzeug/middleware/lint.py
new file mode 100644
index 0000000..de93b52
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/middleware/lint.py
@@ -0,0 +1,439 @@
+"""
+WSGI Protocol Linter
+====================
+
+This module provides a middleware that performs sanity checks on the
+behavior of the WSGI server and application. It checks that the
+:pep:`3333` WSGI spec is properly implemented. It also warns on some
+common HTTP errors such as non-empty responses for 304 status codes.
+
+.. autoclass:: LintMiddleware
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+
+from __future__ import annotations
+
+import typing as t
+from types import TracebackType
+from urllib.parse import urlparse
+from warnings import warn
+
+from ..datastructures import Headers
+from ..http import is_entity_header
+from ..wsgi import FileWrapper
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+class WSGIWarning(Warning):
+ """Warning class for WSGI warnings."""
+
+
+class HTTPWarning(Warning):
+ """Warning class for HTTP warnings."""
+
+
+def check_type(context: str, obj: object, need: type = str) -> None:
+ if type(obj) is not need:
+ warn(
+ f"{context!r} requires {need.__name__!r}, got {type(obj).__name__!r}.",
+ WSGIWarning,
+ stacklevel=3,
+ )
+
+
+class InputStream:
+ def __init__(self, stream: t.IO[bytes]) -> None:
+ self._stream = stream
+
+ def read(self, *args: t.Any) -> bytes:
+ if len(args) == 0:
+ warn(
+ "WSGI does not guarantee an EOF marker on the input stream, thus making"
+ " calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
+ " return from this call.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+ elif len(args) != 1:
+ warn(
+ "Too many parameters passed to 'wsgi.input.read()'.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+ return self._stream.read(*args)
+
+ def readline(self, *args: t.Any) -> bytes:
+ if len(args) == 0:
+ warn(
+ "Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
+ " 'wsgi.input.read()' instead.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+ elif len(args) == 1:
+ warn(
+ "'wsgi.input.readline()' was called with a size hint. WSGI does not"
+ " support this, although it's available on all major servers.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+ else:
+ raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
+ return self._stream.readline(*args)
+
+ def __iter__(self) -> t.Iterator[bytes]:
+ try:
+ return iter(self._stream)
+ except TypeError:
+ warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
+ return iter(())
+
+ def close(self) -> None:
+ warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
+ self._stream.close()
+
+
+class ErrorStream:
+ def __init__(self, stream: t.IO[str]) -> None:
+ self._stream = stream
+
+ def write(self, s: str) -> None:
+ check_type("wsgi.error.write()", s, str)
+ self._stream.write(s)
+
+ def flush(self) -> None:
+ self._stream.flush()
+
+ def writelines(self, seq: t.Iterable[str]) -> None:
+ for line in seq:
+ self.write(line)
+
+ def close(self) -> None:
+ warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
+ self._stream.close()
+
+
+class GuardedWrite:
+ def __init__(self, write: t.Callable[[bytes], object], chunks: list[int]) -> None:
+ self._write = write
+ self._chunks = chunks
+
+ def __call__(self, s: bytes) -> None:
+ check_type("write()", s, bytes)
+ self._write(s)
+ self._chunks.append(len(s))
+
+
+class GuardedIterator:
+ def __init__(
+ self,
+ iterator: t.Iterable[bytes],
+ headers_set: tuple[int, Headers],
+ chunks: list[int],
+ ) -> None:
+ self._iterator = iterator
+ self._next = iter(iterator).__next__
+ self.closed = False
+ self.headers_set = headers_set
+ self.chunks = chunks
+
+ def __iter__(self) -> GuardedIterator:
+ return self
+
+ def __next__(self) -> bytes:
+ if self.closed:
+ warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
+
+ rv = self._next()
+
+ if not self.headers_set:
+ warn(
+ "The application returned before it started the response.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+
+ check_type("application iterator items", rv, bytes)
+ self.chunks.append(len(rv))
+ return rv
+
+ def close(self) -> None:
+ self.closed = True
+
+ if hasattr(self._iterator, "close"):
+ self._iterator.close()
+
+ if self.headers_set:
+ status_code, headers = self.headers_set
+ bytes_sent = sum(self.chunks)
+ content_length = headers.get("content-length", type=int)
+
+ if status_code == 304:
+ for key, _value in headers:
+ key = key.lower()
+ if key not in ("expires", "content-location") and is_entity_header(
+ key
+ ):
+ warn(
+ f"Entity header {key!r} found in 304 response.",
+ HTTPWarning,
+ stacklevel=2,
+ )
+ if bytes_sent:
+ warn(
+ "304 responses must not have a body.",
+ HTTPWarning,
+ stacklevel=2,
+ )
+ elif 100 <= status_code < 200 or status_code == 204:
+ if content_length != 0:
+ warn(
+ f"{status_code} responses must have an empty content length.",
+ HTTPWarning,
+ stacklevel=2,
+ )
+ if bytes_sent:
+ warn(
+ f"{status_code} responses must not have a body.",
+ HTTPWarning,
+ stacklevel=2,
+ )
+ elif content_length is not None and content_length != bytes_sent:
+ warn(
+ "Content-Length and the number of bytes sent to the"
+ " client do not match.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+
+ def __del__(self) -> None:
+ if not self.closed:
+ try:
+ warn(
+ "Iterator was garbage collected before it was closed.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+ except Exception:
+ pass
+
+
+class LintMiddleware:
+ """Warns about common errors in the WSGI and HTTP behavior of the
+ server and wrapped application. Some of the issues it checks are:
+
+ - invalid status codes
+ - non-bytes sent to the WSGI server
+ - strings returned from the WSGI application
+ - non-empty conditional responses
+ - unquoted etags
+ - relative URLs in the Location header
+ - unsafe calls to wsgi.input
+ - unclosed iterators
+
+ Error information is emitted using the :mod:`warnings` module.
+
+ :param app: The WSGI application to wrap.
+
+ .. code-block:: python
+
+ from werkzeug.middleware.lint import LintMiddleware
+ app = LintMiddleware(app)
+ """
+
+ def __init__(self, app: WSGIApplication) -> None:
+ self.app = app
+
+ def check_environ(self, environ: WSGIEnvironment) -> None:
+ if type(environ) is not dict: # noqa: E721
+ warn(
+ "WSGI environment is not a standard Python dict.",
+ WSGIWarning,
+ stacklevel=4,
+ )
+ for key in (
+ "REQUEST_METHOD",
+ "SERVER_NAME",
+ "SERVER_PORT",
+ "wsgi.version",
+ "wsgi.input",
+ "wsgi.errors",
+ "wsgi.multithread",
+ "wsgi.multiprocess",
+ "wsgi.run_once",
+ ):
+ if key not in environ:
+ warn(
+ f"Required environment key {key!r} not found",
+ WSGIWarning,
+ stacklevel=3,
+ )
+ if environ["wsgi.version"] != (1, 0):
+ warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
+
+ script_name = environ.get("SCRIPT_NAME", "")
+ path_info = environ.get("PATH_INFO", "")
+
+ if script_name and script_name[0] != "/":
+ warn(
+ f"'SCRIPT_NAME' does not start with a slash: {script_name!r}",
+ WSGIWarning,
+ stacklevel=3,
+ )
+
+ if path_info and path_info[0] != "/":
+ warn(
+ f"'PATH_INFO' does not start with a slash: {path_info!r}",
+ WSGIWarning,
+ stacklevel=3,
+ )
+
+ def check_start_response(
+ self,
+ status: str,
+ headers: list[tuple[str, str]],
+ exc_info: None | (tuple[type[BaseException], BaseException, TracebackType]),
+ ) -> tuple[int, Headers]:
+ check_type("status", status, str)
+ status_code_str = status.split(None, 1)[0]
+
+ if len(status_code_str) != 3 or not status_code_str.isdecimal():
+ warn("Status code must be three digits.", WSGIWarning, stacklevel=3)
+
+ if len(status) < 4 or status[3] != " ":
+ warn(
+ f"Invalid value for status {status!r}. Valid status strings are three"
+ " digits, a space and a status explanation.",
+ WSGIWarning,
+ stacklevel=3,
+ )
+
+ status_code = int(status_code_str)
+
+ if status_code < 100:
+ warn("Status code < 100 detected.", WSGIWarning, stacklevel=3)
+
+ if type(headers) is not list: # noqa: E721
+ warn("Header list is not a list.", WSGIWarning, stacklevel=3)
+
+ for item in headers:
+ if type(item) is not tuple or len(item) != 2:
+ warn("Header items must be 2-item tuples.", WSGIWarning, stacklevel=3)
+ name, value = item
+ if type(name) is not str or type(value) is not str: # noqa: E721
+ warn(
+ "Header keys and values must be strings.", WSGIWarning, stacklevel=3
+ )
+ if name.lower() == "status":
+ warn(
+ "The status header is not supported due to"
+ " conflicts with the CGI spec.",
+ WSGIWarning,
+ stacklevel=3,
+ )
+
+ if exc_info is not None and not isinstance(exc_info, tuple):
+ warn("Invalid value for exc_info.", WSGIWarning, stacklevel=3)
+
+ headers_obj = Headers(headers)
+ self.check_headers(headers_obj)
+
+ return status_code, headers_obj
+
+ def check_headers(self, headers: Headers) -> None:
+ etag = headers.get("etag")
+
+ if etag is not None:
+ if etag.startswith(("W/", "w/")):
+ if etag.startswith("w/"):
+ warn(
+ "Weak etag indicator should be upper case.",
+ HTTPWarning,
+ stacklevel=4,
+ )
+
+ etag = etag[2:]
+
+ if not (etag[:1] == etag[-1:] == '"'):
+ warn("Unquoted etag emitted.", HTTPWarning, stacklevel=4)
+
+ location = headers.get("location")
+
+ if location is not None:
+ if not urlparse(location).netloc:
+ warn(
+ "Absolute URLs required for location header.",
+ HTTPWarning,
+ stacklevel=4,
+ )
+
+ def check_iterator(self, app_iter: t.Iterable[bytes]) -> None:
+ if isinstance(app_iter, str):
+ warn(
+ "The application returned a string. The response will send one"
+ " character at a time to the client, which will kill performance."
+ " Return a list or iterable instead.",
+ WSGIWarning,
+ stacklevel=3,
+ )
+
+ def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Iterable[bytes]:
+ if len(args) != 2:
+ warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
+
+ if kwargs:
+ warn(
+ "A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
+ )
+
+ environ: WSGIEnvironment = args[0]
+ start_response: StartResponse = args[1]
+
+ self.check_environ(environ)
+ environ["wsgi.input"] = InputStream(environ["wsgi.input"])
+ environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
+
+ # Hook our own file wrapper in so that applications will always
+ # iterate to the end and we can check the content length.
+ environ["wsgi.file_wrapper"] = FileWrapper
+
+ headers_set: list[t.Any] = []
+ chunks: list[int] = []
+
+ def checking_start_response(
+ *args: t.Any, **kwargs: t.Any
+ ) -> t.Callable[[bytes], None]:
+ if len(args) not in {2, 3}:
+ warn(
+ f"Invalid number of arguments: {len(args)}, expected 2 or 3.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+
+ if kwargs:
+ warn(
+ "'start_response' does not take keyword arguments.",
+ WSGIWarning,
+ stacklevel=2,
+ )
+
+ status: str = args[0]
+ headers: list[tuple[str, str]] = args[1]
+ exc_info: (
+ None | (tuple[type[BaseException], BaseException, TracebackType])
+ ) = args[2] if len(args) == 3 else None
+
+ headers_set[:] = self.check_start_response(status, headers, exc_info)
+ return GuardedWrite(start_response(status, headers, exc_info), chunks)
+
+ app_iter = self.app(environ, t.cast("StartResponse", checking_start_response))
+ self.check_iterator(app_iter)
+ return GuardedIterator(
+ app_iter, t.cast(t.Tuple[int, Headers], headers_set), chunks
+ )
diff --git a/venv/lib/python3.8/site-packages/werkzeug/middleware/profiler.py b/venv/lib/python3.8/site-packages/werkzeug/middleware/profiler.py
new file mode 100644
index 0000000..112b877
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/middleware/profiler.py
@@ -0,0 +1,155 @@
+"""
+Application Profiler
+====================
+
+This module provides a middleware that profiles each request with the
+:mod:`cProfile` module. This can help identify bottlenecks in your code
+that may be slowing down your application.
+
+.. autoclass:: ProfilerMiddleware
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+
+from __future__ import annotations
+
+import os.path
+import sys
+import time
+import typing as t
+from pstats import Stats
+
+try:
+ from cProfile import Profile
+except ImportError:
+ from profile import Profile # type: ignore
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+class ProfilerMiddleware:
+ """Wrap a WSGI application and profile the execution of each
+ request. Responses are buffered so that timings are more exact.
+
+ If ``stream`` is given, :class:`pstats.Stats` are written to it
+ after each request. If ``profile_dir`` is given, :mod:`cProfile`
+ data files are saved to that directory, one file per request.
+
+ The filename can be customized by passing ``filename_format``. If
+ it is a string, it will be formatted using :meth:`str.format` with
+ the following fields available:
+
+ - ``{method}`` - The request method; GET, POST, etc.
+ - ``{path}`` - The request path or 'root' should one not exist.
+ - ``{elapsed}`` - The elapsed time of the request in milliseconds.
+ - ``{time}`` - The time of the request.
+
+ If it is a callable, it will be called with the WSGI ``environ`` and
+ be expected to return a filename string. The ``environ`` dictionary
+ will also have the ``"werkzeug.profiler"`` key populated with a
+ dictionary containing the following fields (more may be added in the
+ future):
+ - ``{elapsed}`` - The elapsed time of the request in milliseconds.
+ - ``{time}`` - The time of the request.
+
+ :param app: The WSGI application to wrap.
+ :param stream: Write stats to this stream. Disable with ``None``.
+ :param sort_by: A tuple of columns to sort stats by. See
+ :meth:`pstats.Stats.sort_stats`.
+ :param restrictions: A tuple of restrictions to filter stats by. See
+ :meth:`pstats.Stats.print_stats`.
+ :param profile_dir: Save profile data files to this directory.
+ :param filename_format: Format string for profile data file names,
+ or a callable returning a name. See explanation above.
+
+ .. code-block:: python
+
+ from werkzeug.middleware.profiler import ProfilerMiddleware
+ app = ProfilerMiddleware(app)
+
+ .. versionchanged:: 3.0
+ Added the ``"werkzeug.profiler"`` key to the ``filename_format(environ)``
+ parameter with the ``elapsed`` and ``time`` fields.
+
+ .. versionchanged:: 0.15
+ Stats are written even if ``profile_dir`` is given, and can be
+ disable by passing ``stream=None``.
+
+ .. versionadded:: 0.15
+ Added ``filename_format``.
+
+ .. versionadded:: 0.9
+ Added ``restrictions`` and ``profile_dir``.
+ """
+
+ def __init__(
+ self,
+ app: WSGIApplication,
+ stream: t.IO[str] | None = sys.stdout,
+ sort_by: t.Iterable[str] = ("time", "calls"),
+ restrictions: t.Iterable[str | int | float] = (),
+ profile_dir: str | None = None,
+ filename_format: str = "{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
+ ) -> None:
+ self._app = app
+ self._stream = stream
+ self._sort_by = sort_by
+ self._restrictions = restrictions
+ self._profile_dir = profile_dir
+ self._filename_format = filename_format
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ response_body: list[bytes] = []
+
+ def catching_start_response(status, headers, exc_info=None): # type: ignore
+ start_response(status, headers, exc_info)
+ return response_body.append
+
+ def runapp() -> None:
+ app_iter = self._app(
+ environ, t.cast("StartResponse", catching_start_response)
+ )
+ response_body.extend(app_iter)
+
+ if hasattr(app_iter, "close"):
+ app_iter.close()
+
+ profile = Profile()
+ start = time.time()
+ profile.runcall(runapp)
+ body = b"".join(response_body)
+ elapsed = time.time() - start
+
+ if self._profile_dir is not None:
+ if callable(self._filename_format):
+ environ["werkzeug.profiler"] = {
+ "elapsed": elapsed * 1000.0,
+ "time": time.time(),
+ }
+ filename = self._filename_format(environ)
+ else:
+ filename = self._filename_format.format(
+ method=environ["REQUEST_METHOD"],
+ path=environ["PATH_INFO"].strip("/").replace("/", ".") or "root",
+ elapsed=elapsed * 1000.0,
+ time=time.time(),
+ )
+ filename = os.path.join(self._profile_dir, filename)
+ profile.dump_stats(filename)
+
+ if self._stream is not None:
+ stats = Stats(profile, stream=self._stream)
+ stats.sort_stats(*self._sort_by)
+ print("-" * 80, file=self._stream)
+ path_info = environ.get("PATH_INFO", "")
+ print(f"PATH: {path_info!r}", file=self._stream)
+ stats.print_stats(*self._restrictions)
+ print(f"{'-' * 80}\n", file=self._stream)
+
+ return [body]
diff --git a/venv/lib/python3.8/site-packages/werkzeug/middleware/proxy_fix.py b/venv/lib/python3.8/site-packages/werkzeug/middleware/proxy_fix.py
new file mode 100644
index 0000000..cbf4e0b
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/middleware/proxy_fix.py
@@ -0,0 +1,183 @@
+"""
+X-Forwarded-For Proxy Fix
+=========================
+
+This module provides a middleware that adjusts the WSGI environ based on
+``X-Forwarded-`` headers that proxies in front of an application may
+set.
+
+When an application is running behind a proxy server, WSGI may see the
+request as coming from that server rather than the real client. Proxies
+set various headers to track where the request actually came from.
+
+This middleware should only be used if the application is actually
+behind such a proxy, and should be configured with the number of proxies
+that are chained in front of it. Not all proxies set all the headers.
+Since incoming headers can be faked, you must set how many proxies are
+setting each header so the middleware knows what to trust.
+
+.. autoclass:: ProxyFix
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+
+from __future__ import annotations
+
+import typing as t
+
+from ..http import parse_list_header
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+class ProxyFix:
+ """Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
+ front of the application may set.
+
+ - ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
+ - ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
+ - ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
+ ``SERVER_PORT``.
+ - ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
+ - ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
+
+ You must tell the middleware how many proxies set each header so it
+ knows what values to trust. It is a security issue to trust values
+ that came from the client rather than a proxy.
+
+ The original values of the headers are stored in the WSGI
+ environ as ``werkzeug.proxy_fix.orig``, a dict.
+
+ :param app: The WSGI application to wrap.
+ :param x_for: Number of values to trust for ``X-Forwarded-For``.
+ :param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
+ :param x_host: Number of values to trust for ``X-Forwarded-Host``.
+ :param x_port: Number of values to trust for ``X-Forwarded-Port``.
+ :param x_prefix: Number of values to trust for
+ ``X-Forwarded-Prefix``.
+
+ .. code-block:: python
+
+ from werkzeug.middleware.proxy_fix import ProxyFix
+ # App is behind one proxy that sets the -For and -Host headers.
+ app = ProxyFix(app, x_for=1, x_host=1)
+
+ .. versionchanged:: 1.0
+ The ``num_proxies`` argument and attribute; the ``get_remote_addr`` method; and
+ the environ keys ``orig_remote_addr``, ``orig_wsgi_url_scheme``, and
+ ``orig_http_host`` were removed.
+
+ .. versionchanged:: 0.15
+ All headers support multiple values. Each header is configured with a separate
+ number of trusted proxies.
+
+ .. versionchanged:: 0.15
+ Original WSGI environ values are stored in the ``werkzeug.proxy_fix.orig`` dict.
+
+ .. versionchanged:: 0.15
+ Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
+
+ .. versionchanged:: 0.15
+ ``X-Forwarded-Host`` and ``X-Forwarded-Port`` modify
+ ``SERVER_NAME`` and ``SERVER_PORT``.
+ """
+
+ def __init__(
+ self,
+ app: WSGIApplication,
+ x_for: int = 1,
+ x_proto: int = 1,
+ x_host: int = 0,
+ x_port: int = 0,
+ x_prefix: int = 0,
+ ) -> None:
+ self.app = app
+ self.x_for = x_for
+ self.x_proto = x_proto
+ self.x_host = x_host
+ self.x_port = x_port
+ self.x_prefix = x_prefix
+
+ def _get_real_value(self, trusted: int, value: str | None) -> str | None:
+ """Get the real value from a list header based on the configured
+ number of trusted proxies.
+
+ :param trusted: Number of values to trust in the header.
+ :param value: Comma separated list header value to parse.
+ :return: The real value, or ``None`` if there are fewer values
+ than the number of trusted proxies.
+
+ .. versionchanged:: 1.0
+ Renamed from ``_get_trusted_comma``.
+
+ .. versionadded:: 0.15
+ """
+ if not (trusted and value):
+ return None
+ values = parse_list_header(value)
+ if len(values) >= trusted:
+ return values[-trusted]
+ return None
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ """Modify the WSGI environ based on the various ``Forwarded``
+ headers before calling the wrapped application. Store the
+ original environ values in ``werkzeug.proxy_fix.orig_{key}``.
+ """
+ environ_get = environ.get
+ orig_remote_addr = environ_get("REMOTE_ADDR")
+ orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
+ orig_http_host = environ_get("HTTP_HOST")
+ environ.update(
+ {
+ "werkzeug.proxy_fix.orig": {
+ "REMOTE_ADDR": orig_remote_addr,
+ "wsgi.url_scheme": orig_wsgi_url_scheme,
+ "HTTP_HOST": orig_http_host,
+ "SERVER_NAME": environ_get("SERVER_NAME"),
+ "SERVER_PORT": environ_get("SERVER_PORT"),
+ "SCRIPT_NAME": environ_get("SCRIPT_NAME"),
+ }
+ }
+ )
+
+ x_for = self._get_real_value(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
+ if x_for:
+ environ["REMOTE_ADDR"] = x_for
+
+ x_proto = self._get_real_value(
+ self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
+ )
+ if x_proto:
+ environ["wsgi.url_scheme"] = x_proto
+
+ x_host = self._get_real_value(self.x_host, environ_get("HTTP_X_FORWARDED_HOST"))
+ if x_host:
+ environ["HTTP_HOST"] = environ["SERVER_NAME"] = x_host
+ # "]" to check for IPv6 address without port
+ if ":" in x_host and not x_host.endswith("]"):
+ environ["SERVER_NAME"], environ["SERVER_PORT"] = x_host.rsplit(":", 1)
+
+ x_port = self._get_real_value(self.x_port, environ_get("HTTP_X_FORWARDED_PORT"))
+ if x_port:
+ host = environ.get("HTTP_HOST")
+ if host:
+ # "]" to check for IPv6 address without port
+ if ":" in host and not host.endswith("]"):
+ host = host.rsplit(":", 1)[0]
+ environ["HTTP_HOST"] = f"{host}:{x_port}"
+ environ["SERVER_PORT"] = x_port
+
+ x_prefix = self._get_real_value(
+ self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
+ )
+ if x_prefix:
+ environ["SCRIPT_NAME"] = x_prefix
+
+ return self.app(environ, start_response)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/middleware/shared_data.py b/venv/lib/python3.8/site-packages/werkzeug/middleware/shared_data.py
new file mode 100644
index 0000000..0f467f2
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/middleware/shared_data.py
@@ -0,0 +1,283 @@
+"""
+Serve Shared Static Files
+=========================
+
+.. autoclass:: SharedDataMiddleware
+ :members: is_allowed
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+
+from __future__ import annotations
+
+import collections.abc as cabc
+import importlib.util
+import mimetypes
+import os
+import posixpath
+import typing as t
+from datetime import datetime
+from datetime import timezone
+from io import BytesIO
+from time import time
+from zlib import adler32
+
+from ..http import http_date
+from ..http import is_resource_modified
+from ..security import safe_join
+from ..utils import get_content_type
+from ..wsgi import get_path_info
+from ..wsgi import wrap_file
+
+_TOpener = t.Callable[[], t.Tuple[t.IO[bytes], datetime, int]]
+_TLoader = t.Callable[[t.Optional[str]], t.Tuple[t.Optional[str], t.Optional[_TOpener]]]
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+class SharedDataMiddleware:
+ """A WSGI middleware which provides static content for development
+ environments or simple server setups. Its usage is quite simple::
+
+ import os
+ from werkzeug.middleware.shared_data import SharedDataMiddleware
+
+ app = SharedDataMiddleware(app, {
+ '/shared': os.path.join(os.path.dirname(__file__), 'shared')
+ })
+
+ The contents of the folder ``./shared`` will now be available on
+ ``http://example.com/shared/``. This is pretty useful during development
+ because a standalone media server is not required. Files can also be
+ mounted on the root folder and still continue to use the application because
+ the shared data middleware forwards all unhandled requests to the
+ application, even if the requests are below one of the shared folders.
+
+ If `pkg_resources` is available you can also tell the middleware to serve
+ files from package data::
+
+ app = SharedDataMiddleware(app, {
+ '/static': ('myapplication', 'static')
+ })
+
+ This will then serve the ``static`` folder in the `myapplication`
+ Python package.
+
+ The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
+ rules for files that are not accessible from the web. If `cache` is set to
+ `False` no caching headers are sent.
+
+ Currently the middleware does not support non-ASCII filenames. If the
+ encoding on the file system happens to match the encoding of the URI it may
+ work but this could also be by accident. We strongly suggest using ASCII
+ only file names for static files.
+
+ The middleware will guess the mimetype using the Python `mimetype`
+ module. If it's unable to figure out the charset it will fall back
+ to `fallback_mimetype`.
+
+ :param app: the application to wrap. If you don't want to wrap an
+ application you can pass it :exc:`NotFound`.
+ :param exports: a list or dict of exported files and folders.
+ :param disallow: a list of :func:`~fnmatch.fnmatch` rules.
+ :param cache: enable or disable caching headers.
+ :param cache_timeout: the cache timeout in seconds for the headers.
+ :param fallback_mimetype: The fallback mimetype for unknown files.
+
+ .. versionchanged:: 1.0
+ The default ``fallback_mimetype`` is
+ ``application/octet-stream``. If a filename looks like a text
+ mimetype, the ``utf-8`` charset is added to it.
+
+ .. versionadded:: 0.6
+ Added ``fallback_mimetype``.
+
+ .. versionchanged:: 0.5
+ Added ``cache_timeout``.
+ """
+
+ def __init__(
+ self,
+ app: WSGIApplication,
+ exports: (
+ cabc.Mapping[str, str | tuple[str, str]]
+ | t.Iterable[tuple[str, str | tuple[str, str]]]
+ ),
+ disallow: None = None,
+ cache: bool = True,
+ cache_timeout: int = 60 * 60 * 12,
+ fallback_mimetype: str = "application/octet-stream",
+ ) -> None:
+ self.app = app
+ self.exports: list[tuple[str, _TLoader]] = []
+ self.cache = cache
+ self.cache_timeout = cache_timeout
+
+ if isinstance(exports, cabc.Mapping):
+ exports = exports.items()
+
+ for key, value in exports:
+ if isinstance(value, tuple):
+ loader = self.get_package_loader(*value)
+ elif isinstance(value, str):
+ if os.path.isfile(value):
+ loader = self.get_file_loader(value)
+ else:
+ loader = self.get_directory_loader(value)
+ else:
+ raise TypeError(f"unknown def {value!r}")
+
+ self.exports.append((key, loader))
+
+ if disallow is not None:
+ from fnmatch import fnmatch
+
+ self.is_allowed = lambda x: not fnmatch(x, disallow)
+
+ self.fallback_mimetype = fallback_mimetype
+
+ def is_allowed(self, filename: str) -> bool:
+ """Subclasses can override this method to disallow the access to
+ certain files. However by providing `disallow` in the constructor
+ this method is overwritten.
+ """
+ return True
+
+ def _opener(self, filename: str) -> _TOpener:
+ return lambda: (
+ open(filename, "rb"),
+ datetime.fromtimestamp(os.path.getmtime(filename), tz=timezone.utc),
+ int(os.path.getsize(filename)),
+ )
+
+ def get_file_loader(self, filename: str) -> _TLoader:
+ return lambda x: (os.path.basename(filename), self._opener(filename))
+
+ def get_package_loader(self, package: str, package_path: str) -> _TLoader:
+ load_time = datetime.now(timezone.utc)
+ spec = importlib.util.find_spec(package)
+ reader = spec.loader.get_resource_reader(package) # type: ignore[union-attr]
+
+ def loader(
+ path: str | None,
+ ) -> tuple[str | None, _TOpener | None]:
+ if path is None:
+ return None, None
+
+ path = safe_join(package_path, path)
+
+ if path is None:
+ return None, None
+
+ basename = posixpath.basename(path)
+
+ try:
+ resource = reader.open_resource(path)
+ except OSError:
+ return None, None
+
+ if isinstance(resource, BytesIO):
+ return (
+ basename,
+ lambda: (resource, load_time, len(resource.getvalue())),
+ )
+
+ return (
+ basename,
+ lambda: (
+ resource,
+ datetime.fromtimestamp(
+ os.path.getmtime(resource.name), tz=timezone.utc
+ ),
+ os.path.getsize(resource.name),
+ ),
+ )
+
+ return loader
+
+ def get_directory_loader(self, directory: str) -> _TLoader:
+ def loader(
+ path: str | None,
+ ) -> tuple[str | None, _TOpener | None]:
+ if path is not None:
+ path = safe_join(directory, path)
+
+ if path is None:
+ return None, None
+ else:
+ path = directory
+
+ if os.path.isfile(path):
+ return os.path.basename(path), self._opener(path)
+
+ return None, None
+
+ return loader
+
+ def generate_etag(self, mtime: datetime, file_size: int, real_filename: str) -> str:
+ fn_str = os.fsencode(real_filename)
+ timestamp = mtime.timestamp()
+ checksum = adler32(fn_str) & 0xFFFFFFFF
+ return f"wzsdm-{timestamp}-{file_size}-{checksum}"
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ path = get_path_info(environ)
+ file_loader = None
+
+ for search_path, loader in self.exports:
+ if search_path == path:
+ real_filename, file_loader = loader(None)
+
+ if file_loader is not None:
+ break
+
+ if not search_path.endswith("/"):
+ search_path += "/"
+
+ if path.startswith(search_path):
+ real_filename, file_loader = loader(path[len(search_path) :])
+
+ if file_loader is not None:
+ break
+
+ if file_loader is None or not self.is_allowed(real_filename): # type: ignore
+ return self.app(environ, start_response)
+
+ guessed_type = mimetypes.guess_type(real_filename) # type: ignore
+ mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8")
+ f, mtime, file_size = file_loader()
+
+ headers = [("Date", http_date())]
+
+ if self.cache:
+ timeout = self.cache_timeout
+ etag = self.generate_etag(mtime, file_size, real_filename) # type: ignore
+ headers += [
+ ("Etag", f'"{etag}"'),
+ ("Cache-Control", f"max-age={timeout}, public"),
+ ]
+
+ if not is_resource_modified(environ, etag, last_modified=mtime):
+ f.close()
+ start_response("304 Not Modified", headers)
+ return []
+
+ headers.append(("Expires", http_date(time() + timeout)))
+ else:
+ headers.append(("Cache-Control", "public"))
+
+ headers.extend(
+ (
+ ("Content-Type", mime_type),
+ ("Content-Length", str(file_size)),
+ ("Last-Modified", http_date(mtime)),
+ )
+ )
+ start_response("200 OK", headers)
+ return wrap_file(environ, f)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/py.typed b/venv/lib/python3.8/site-packages/werkzeug/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/py.typed
diff --git a/venv/lib/python3.8/site-packages/werkzeug/routing/__init__.py b/venv/lib/python3.8/site-packages/werkzeug/routing/__init__.py
new file mode 100644
index 0000000..62adc48
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/routing/__init__.py
@@ -0,0 +1,134 @@
+"""When it comes to combining multiple controller or view functions
+(however you want to call them) you need a dispatcher. A simple way
+would be applying regular expression tests on the ``PATH_INFO`` and
+calling registered callback functions that return the value then.
+
+This module implements a much more powerful system than simple regular
+expression matching because it can also convert values in the URLs and
+build URLs.
+
+Here a simple example that creates a URL map for an application with
+two subdomains (www and kb) and some URL rules:
+
+.. code-block:: python
+
+ m = Map([
+ # Static URLs
+ Rule('/', endpoint='static/index'),
+ Rule('/about', endpoint='static/about'),
+ Rule('/help', endpoint='static/help'),
+ # Knowledge Base
+ Subdomain('kb', [
+ Rule('/', endpoint='kb/index'),
+ Rule('/browse/', endpoint='kb/browse'),
+ Rule('/browse/<int:id>/', endpoint='kb/browse'),
+ Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
+ ])
+ ], default_subdomain='www')
+
+If the application doesn't use subdomains it's perfectly fine to not set
+the default subdomain and not use the `Subdomain` rule factory. The
+endpoint in the rules can be anything, for example import paths or
+unique identifiers. The WSGI application can use those endpoints to get the
+handler for that URL. It doesn't have to be a string at all but it's
+recommended.
+
+Now it's possible to create a URL adapter for one of the subdomains and
+build URLs:
+
+.. code-block:: python
+
+ c = m.bind('example.com')
+
+ c.build("kb/browse", dict(id=42))
+ 'http://kb.example.com/browse/42/'
+
+ c.build("kb/browse", dict())
+ 'http://kb.example.com/browse/'
+
+ c.build("kb/browse", dict(id=42, page=3))
+ 'http://kb.example.com/browse/42/3'
+
+ c.build("static/about")
+ '/about'
+
+ c.build("static/index", force_external=True)
+ 'http://www.example.com/'
+
+ c = m.bind('example.com', subdomain='kb')
+
+ c.build("static/about")
+ 'http://www.example.com/about'
+
+The first argument to bind is the server name *without* the subdomain.
+Per default it will assume that the script is mounted on the root, but
+often that's not the case so you can provide the real mount point as
+second argument:
+
+.. code-block:: python
+
+ c = m.bind('example.com', '/applications/example')
+
+The third argument can be the subdomain, if not given the default
+subdomain is used. For more details about binding have a look at the
+documentation of the `MapAdapter`.
+
+And here is how you can match URLs:
+
+.. code-block:: python
+
+ c = m.bind('example.com')
+
+ c.match("/")
+ ('static/index', {})
+
+ c.match("/about")
+ ('static/about', {})
+
+ c = m.bind('example.com', '/', 'kb')
+
+ c.match("/")
+ ('kb/index', {})
+
+ c.match("/browse/42/23")
+ ('kb/browse', {'id': 42, 'page': 23})
+
+If matching fails you get a ``NotFound`` exception, if the rule thinks
+it's a good idea to redirect (for example because the URL was defined
+to have a slash at the end but the request was missing that slash) it
+will raise a ``RequestRedirect`` exception. Both are subclasses of
+``HTTPException`` so you can use those errors as responses in the
+application.
+
+If matching succeeded but the URL rule was incompatible to the given
+method (for example there were only rules for ``GET`` and ``HEAD`` but
+routing tried to match a ``POST`` request) a ``MethodNotAllowed``
+exception is raised.
+"""
+
+from .converters import AnyConverter as AnyConverter
+from .converters import BaseConverter as BaseConverter
+from .converters import FloatConverter as FloatConverter
+from .converters import IntegerConverter as IntegerConverter
+from .converters import PathConverter as PathConverter
+from .converters import UnicodeConverter as UnicodeConverter
+from .converters import UUIDConverter as UUIDConverter
+from .converters import ValidationError as ValidationError
+from .exceptions import BuildError as BuildError
+from .exceptions import NoMatch as NoMatch
+from .exceptions import RequestAliasRedirect as RequestAliasRedirect
+from .exceptions import RequestPath as RequestPath
+from .exceptions import RequestRedirect as RequestRedirect
+from .exceptions import RoutingException as RoutingException
+from .exceptions import WebsocketMismatch as WebsocketMismatch
+from .map import Map as Map
+from .map import MapAdapter as MapAdapter
+from .matcher import StateMachineMatcher as StateMachineMatcher
+from .rules import EndpointPrefix as EndpointPrefix
+from .rules import parse_converter_args as parse_converter_args
+from .rules import Rule as Rule
+from .rules import RuleFactory as RuleFactory
+from .rules import RuleTemplate as RuleTemplate
+from .rules import RuleTemplateFactory as RuleTemplateFactory
+from .rules import Subdomain as Subdomain
+from .rules import Submount as Submount
diff --git a/venv/lib/python3.8/site-packages/werkzeug/routing/converters.py b/venv/lib/python3.8/site-packages/werkzeug/routing/converters.py
new file mode 100644
index 0000000..6016a97
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/routing/converters.py
@@ -0,0 +1,261 @@
+from __future__ import annotations
+
+import re
+import typing as t
+import uuid
+from urllib.parse import quote
+
+if t.TYPE_CHECKING:
+ from .map import Map
+
+
+class ValidationError(ValueError):
+ """Validation error. If a rule converter raises this exception the rule
+ does not match the current URL and the next URL is tried.
+ """
+
+
+class BaseConverter:
+ """Base class for all converters.
+
+ .. versionchanged:: 2.3
+ ``part_isolating`` defaults to ``False`` if ``regex`` contains a ``/``.
+ """
+
+ regex = "[^/]+"
+ weight = 100
+ part_isolating = True
+
+ def __init_subclass__(cls, **kwargs: t.Any) -> None:
+ super().__init_subclass__(**kwargs)
+
+ # If the converter isn't inheriting its regex, disable part_isolating by default
+ # if the regex contains a / character.
+ if "regex" in cls.__dict__ and "part_isolating" not in cls.__dict__:
+ cls.part_isolating = "/" not in cls.regex
+
+ def __init__(self, map: Map, *args: t.Any, **kwargs: t.Any) -> None:
+ self.map = map
+
+ def to_python(self, value: str) -> t.Any:
+ return value
+
+ def to_url(self, value: t.Any) -> str:
+ # safe = https://url.spec.whatwg.org/#url-path-segment-string
+ return quote(str(value), safe="!$&'()*+,/:;=@")
+
+
+class UnicodeConverter(BaseConverter):
+ """This converter is the default converter and accepts any string but
+ only one path segment. Thus the string can not include a slash.
+
+ This is the default validator.
+
+ Example::
+
+ Rule('/pages/<page>'),
+ Rule('/<string(length=2):lang_code>')
+
+ :param map: the :class:`Map`.
+ :param minlength: the minimum length of the string. Must be greater
+ or equal 1.
+ :param maxlength: the maximum length of the string.
+ :param length: the exact length of the string.
+ """
+
+ def __init__(
+ self,
+ map: Map,
+ minlength: int = 1,
+ maxlength: int | None = None,
+ length: int | None = None,
+ ) -> None:
+ super().__init__(map)
+ if length is not None:
+ length_regex = f"{{{int(length)}}}"
+ else:
+ if maxlength is None:
+ maxlength_value = ""
+ else:
+ maxlength_value = str(int(maxlength))
+ length_regex = f"{{{int(minlength)},{maxlength_value}}}"
+ self.regex = f"[^/]{length_regex}"
+
+
+class AnyConverter(BaseConverter):
+ """Matches one of the items provided. Items can either be Python
+ identifiers or strings::
+
+ Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
+
+ :param map: the :class:`Map`.
+ :param items: this function accepts the possible items as positional
+ arguments.
+
+ .. versionchanged:: 2.2
+ Value is validated when building a URL.
+ """
+
+ def __init__(self, map: Map, *items: str) -> None:
+ super().__init__(map)
+ self.items = set(items)
+ self.regex = f"(?:{'|'.join([re.escape(x) for x in items])})"
+
+ def to_url(self, value: t.Any) -> str:
+ if value in self.items:
+ return str(value)
+
+ valid_values = ", ".join(f"'{item}'" for item in sorted(self.items))
+ raise ValueError(f"'{value}' is not one of {valid_values}")
+
+
+class PathConverter(BaseConverter):
+ """Like the default :class:`UnicodeConverter`, but it also matches
+ slashes. This is useful for wikis and similar applications::
+
+ Rule('/<path:wikipage>')
+ Rule('/<path:wikipage>/edit')
+
+ :param map: the :class:`Map`.
+ """
+
+ part_isolating = False
+ regex = "[^/].*?"
+ weight = 200
+
+
+class NumberConverter(BaseConverter):
+ """Baseclass for `IntegerConverter` and `FloatConverter`.
+
+ :internal:
+ """
+
+ weight = 50
+ num_convert: t.Callable[[t.Any], t.Any] = int
+
+ def __init__(
+ self,
+ map: Map,
+ fixed_digits: int = 0,
+ min: int | None = None,
+ max: int | None = None,
+ signed: bool = False,
+ ) -> None:
+ if signed:
+ self.regex = self.signed_regex
+ super().__init__(map)
+ self.fixed_digits = fixed_digits
+ self.min = min
+ self.max = max
+ self.signed = signed
+
+ def to_python(self, value: str) -> t.Any:
+ if self.fixed_digits and len(value) != self.fixed_digits:
+ raise ValidationError()
+ value_num = self.num_convert(value)
+ if (self.min is not None and value_num < self.min) or (
+ self.max is not None and value_num > self.max
+ ):
+ raise ValidationError()
+ return value_num
+
+ def to_url(self, value: t.Any) -> str:
+ value_str = str(self.num_convert(value))
+ if self.fixed_digits:
+ value_str = value_str.zfill(self.fixed_digits)
+ return value_str
+
+ @property
+ def signed_regex(self) -> str:
+ return f"-?{self.regex}"
+
+
+class IntegerConverter(NumberConverter):
+ """This converter only accepts integer values::
+
+ Rule("/page/<int:page>")
+
+ By default it only accepts unsigned, positive values. The ``signed``
+ parameter will enable signed, negative values. ::
+
+ Rule("/page/<int(signed=True):page>")
+
+ :param map: The :class:`Map`.
+ :param fixed_digits: The number of fixed digits in the URL. If you
+ set this to ``4`` for example, the rule will only match if the
+ URL looks like ``/0001/``. The default is variable length.
+ :param min: The minimal value.
+ :param max: The maximal value.
+ :param signed: Allow signed (negative) values.
+
+ .. versionadded:: 0.15
+ The ``signed`` parameter.
+ """
+
+ regex = r"\d+"
+
+
+class FloatConverter(NumberConverter):
+ """This converter only accepts floating point values::
+
+ Rule("/probability/<float:probability>")
+
+ By default it only accepts unsigned, positive values. The ``signed``
+ parameter will enable signed, negative values. ::
+
+ Rule("/offset/<float(signed=True):offset>")
+
+ :param map: The :class:`Map`.
+ :param min: The minimal value.
+ :param max: The maximal value.
+ :param signed: Allow signed (negative) values.
+
+ .. versionadded:: 0.15
+ The ``signed`` parameter.
+ """
+
+ regex = r"\d+\.\d+"
+ num_convert = float
+
+ def __init__(
+ self,
+ map: Map,
+ min: float | None = None,
+ max: float | None = None,
+ signed: bool = False,
+ ) -> None:
+ super().__init__(map, min=min, max=max, signed=signed) # type: ignore
+
+
+class UUIDConverter(BaseConverter):
+ """This converter only accepts UUID strings::
+
+ Rule('/object/<uuid:identifier>')
+
+ .. versionadded:: 0.10
+
+ :param map: the :class:`Map`.
+ """
+
+ regex = (
+ r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
+ r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
+ )
+
+ def to_python(self, value: str) -> uuid.UUID:
+ return uuid.UUID(value)
+
+ def to_url(self, value: uuid.UUID) -> str:
+ return str(value)
+
+
+#: the default converter mapping for the map.
+DEFAULT_CONVERTERS: t.Mapping[str, type[BaseConverter]] = {
+ "default": UnicodeConverter,
+ "string": UnicodeConverter,
+ "any": AnyConverter,
+ "path": PathConverter,
+ "int": IntegerConverter,
+ "float": FloatConverter,
+ "uuid": UUIDConverter,
+}
diff --git a/venv/lib/python3.8/site-packages/werkzeug/routing/exceptions.py b/venv/lib/python3.8/site-packages/werkzeug/routing/exceptions.py
new file mode 100644
index 0000000..eeabd4e
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/routing/exceptions.py
@@ -0,0 +1,152 @@
+from __future__ import annotations
+
+import difflib
+import typing as t
+
+from ..exceptions import BadRequest
+from ..exceptions import HTTPException
+from ..utils import cached_property
+from ..utils import redirect
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from ..wrappers.request import Request
+ from ..wrappers.response import Response
+ from .map import MapAdapter
+ from .rules import Rule
+
+
+class RoutingException(Exception):
+ """Special exceptions that require the application to redirect, notifying
+ about missing urls, etc.
+
+ :internal:
+ """
+
+
+class RequestRedirect(HTTPException, RoutingException):
+ """Raise if the map requests a redirect. This is for example the case if
+ `strict_slashes` are activated and an url that requires a trailing slash.
+
+ The attribute `new_url` contains the absolute destination url.
+ """
+
+ code = 308
+
+ def __init__(self, new_url: str) -> None:
+ super().__init__(new_url)
+ self.new_url = new_url
+
+ def get_response(
+ self,
+ environ: WSGIEnvironment | Request | None = None,
+ scope: dict[str, t.Any] | None = None,
+ ) -> Response:
+ return redirect(self.new_url, self.code)
+
+
+class RequestPath(RoutingException):
+ """Internal exception."""
+
+ __slots__ = ("path_info",)
+
+ def __init__(self, path_info: str) -> None:
+ super().__init__()
+ self.path_info = path_info
+
+
+class RequestAliasRedirect(RoutingException): # noqa: B903
+ """This rule is an alias and wants to redirect to the canonical URL."""
+
+ def __init__(self, matched_values: t.Mapping[str, t.Any], endpoint: t.Any) -> None:
+ super().__init__()
+ self.matched_values = matched_values
+ self.endpoint = endpoint
+
+
+class BuildError(RoutingException, LookupError):
+ """Raised if the build system cannot find a URL for an endpoint with the
+ values provided.
+ """
+
+ def __init__(
+ self,
+ endpoint: t.Any,
+ values: t.Mapping[str, t.Any],
+ method: str | None,
+ adapter: MapAdapter | None = None,
+ ) -> None:
+ super().__init__(endpoint, values, method)
+ self.endpoint = endpoint
+ self.values = values
+ self.method = method
+ self.adapter = adapter
+
+ @cached_property
+ def suggested(self) -> Rule | None:
+ return self.closest_rule(self.adapter)
+
+ def closest_rule(self, adapter: MapAdapter | None) -> Rule | None:
+ def _score_rule(rule: Rule) -> float:
+ return sum(
+ [
+ 0.98
+ * difflib.SequenceMatcher(
+ # endpoints can be any type, compare as strings
+ None,
+ str(rule.endpoint),
+ str(self.endpoint),
+ ).ratio(),
+ 0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
+ 0.01 * bool(rule.methods and self.method in rule.methods),
+ ]
+ )
+
+ if adapter and adapter.map._rules:
+ return max(adapter.map._rules, key=_score_rule)
+
+ return None
+
+ def __str__(self) -> str:
+ message = [f"Could not build url for endpoint {self.endpoint!r}"]
+ if self.method:
+ message.append(f" ({self.method!r})")
+ if self.values:
+ message.append(f" with values {sorted(self.values)!r}")
+ message.append(".")
+ if self.suggested:
+ if self.endpoint == self.suggested.endpoint:
+ if (
+ self.method
+ and self.suggested.methods is not None
+ and self.method not in self.suggested.methods
+ ):
+ message.append(
+ " Did you mean to use methods"
+ f" {sorted(self.suggested.methods)!r}?"
+ )
+ missing_values = self.suggested.arguments.union(
+ set(self.suggested.defaults or ())
+ ) - set(self.values.keys())
+ if missing_values:
+ message.append(
+ f" Did you forget to specify values {sorted(missing_values)!r}?"
+ )
+ else:
+ message.append(f" Did you mean {self.suggested.endpoint!r} instead?")
+ return "".join(message)
+
+
+class WebsocketMismatch(BadRequest):
+ """The only matched rule is either a WebSocket and the request is
+ HTTP, or the rule is HTTP and the request is a WebSocket.
+ """
+
+
+class NoMatch(Exception):
+ __slots__ = ("have_match_for", "websocket_mismatch")
+
+ def __init__(self, have_match_for: set[str], websocket_mismatch: bool) -> None:
+ self.have_match_for = have_match_for
+ self.websocket_mismatch = websocket_mismatch
diff --git a/venv/lib/python3.8/site-packages/werkzeug/routing/map.py b/venv/lib/python3.8/site-packages/werkzeug/routing/map.py
new file mode 100644
index 0000000..4d15e88
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/routing/map.py
@@ -0,0 +1,951 @@
+from __future__ import annotations
+
+import typing as t
+import warnings
+from pprint import pformat
+from threading import Lock
+from urllib.parse import quote
+from urllib.parse import urljoin
+from urllib.parse import urlunsplit
+
+from .._internal import _get_environ
+from .._internal import _wsgi_decoding_dance
+from ..datastructures import ImmutableDict
+from ..datastructures import MultiDict
+from ..exceptions import BadHost
+from ..exceptions import HTTPException
+from ..exceptions import MethodNotAllowed
+from ..exceptions import NotFound
+from ..urls import _urlencode
+from ..wsgi import get_host
+from .converters import DEFAULT_CONVERTERS
+from .exceptions import BuildError
+from .exceptions import NoMatch
+from .exceptions import RequestAliasRedirect
+from .exceptions import RequestPath
+from .exceptions import RequestRedirect
+from .exceptions import WebsocketMismatch
+from .matcher import StateMachineMatcher
+from .rules import _simple_rule_re
+from .rules import Rule
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from ..wrappers.request import Request
+ from .converters import BaseConverter
+ from .rules import RuleFactory
+
+
+class Map:
+ """The map class stores all the URL rules and some configuration
+ parameters. Some of the configuration values are only stored on the
+ `Map` instance since those affect all rules, others are just defaults
+ and can be overridden for each rule. Note that you have to specify all
+ arguments besides the `rules` as keyword arguments!
+
+ :param rules: sequence of url rules for this map.
+ :param default_subdomain: The default subdomain for rules without a
+ subdomain defined.
+ :param strict_slashes: If a rule ends with a slash but the matched
+ URL does not, redirect to the URL with a trailing slash.
+ :param merge_slashes: Merge consecutive slashes when matching or
+ building URLs. Matches will redirect to the normalized URL.
+ Slashes in variable parts are not merged.
+ :param redirect_defaults: This will redirect to the default rule if it
+ wasn't visited that way. This helps creating
+ unique URLs.
+ :param converters: A dict of converters that adds additional converters
+ to the list of converters. If you redefine one
+ converter this will override the original one.
+ :param sort_parameters: If set to `True` the url parameters are sorted.
+ See `url_encode` for more details.
+ :param sort_key: The sort key function for `url_encode`.
+ :param host_matching: if set to `True` it enables the host matching
+ feature and disables the subdomain one. If
+ enabled the `host` parameter to rules is used
+ instead of the `subdomain` one.
+
+ .. versionchanged:: 3.0
+ The ``charset`` and ``encoding_errors`` parameters were removed.
+
+ .. versionchanged:: 1.0
+ If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules will match.
+
+ .. versionchanged:: 1.0
+ The ``merge_slashes`` parameter was added.
+
+ .. versionchanged:: 0.7
+ The ``encoding_errors`` and ``host_matching`` parameters were added.
+
+ .. versionchanged:: 0.5
+ The ``sort_parameters`` and ``sort_key`` paramters were added.
+ """
+
+ #: A dict of default converters to be used.
+ default_converters = ImmutableDict(DEFAULT_CONVERTERS)
+
+ #: The type of lock to use when updating.
+ #:
+ #: .. versionadded:: 1.0
+ lock_class = Lock
+
+ def __init__(
+ self,
+ rules: t.Iterable[RuleFactory] | None = None,
+ default_subdomain: str = "",
+ strict_slashes: bool = True,
+ merge_slashes: bool = True,
+ redirect_defaults: bool = True,
+ converters: t.Mapping[str, type[BaseConverter]] | None = None,
+ sort_parameters: bool = False,
+ sort_key: t.Callable[[t.Any], t.Any] | None = None,
+ host_matching: bool = False,
+ ) -> None:
+ self._matcher = StateMachineMatcher(merge_slashes)
+ self._rules_by_endpoint: dict[t.Any, list[Rule]] = {}
+ self._remap = True
+ self._remap_lock = self.lock_class()
+
+ self.default_subdomain = default_subdomain
+ self.strict_slashes = strict_slashes
+ self.redirect_defaults = redirect_defaults
+ self.host_matching = host_matching
+
+ self.converters = self.default_converters.copy()
+ if converters:
+ self.converters.update(converters)
+
+ self.sort_parameters = sort_parameters
+ self.sort_key = sort_key
+
+ for rulefactory in rules or ():
+ self.add(rulefactory)
+
+ @property
+ def merge_slashes(self) -> bool:
+ return self._matcher.merge_slashes
+
+ @merge_slashes.setter
+ def merge_slashes(self, value: bool) -> None:
+ self._matcher.merge_slashes = value
+
+ def is_endpoint_expecting(self, endpoint: t.Any, *arguments: str) -> bool:
+ """Iterate over all rules and check if the endpoint expects
+ the arguments provided. This is for example useful if you have
+ some URLs that expect a language code and others that do not and
+ you want to wrap the builder a bit so that the current language
+ code is automatically added if not provided but endpoints expect
+ it.
+
+ :param endpoint: the endpoint to check.
+ :param arguments: this function accepts one or more arguments
+ as positional arguments. Each one of them is
+ checked.
+ """
+ self.update()
+ arguments_set = set(arguments)
+ for rule in self._rules_by_endpoint[endpoint]:
+ if arguments_set.issubset(rule.arguments):
+ return True
+ return False
+
+ @property
+ def _rules(self) -> list[Rule]:
+ return [rule for rules in self._rules_by_endpoint.values() for rule in rules]
+
+ def iter_rules(self, endpoint: t.Any | None = None) -> t.Iterator[Rule]:
+ """Iterate over all rules or the rules of an endpoint.
+
+ :param endpoint: if provided only the rules for that endpoint
+ are returned.
+ :return: an iterator
+ """
+ self.update()
+ if endpoint is not None:
+ return iter(self._rules_by_endpoint[endpoint])
+ return iter(self._rules)
+
+ def add(self, rulefactory: RuleFactory) -> None:
+ """Add a new rule or factory to the map and bind it. Requires that the
+ rule is not bound to another map.
+
+ :param rulefactory: a :class:`Rule` or :class:`RuleFactory`
+ """
+ for rule in rulefactory.get_rules(self):
+ rule.bind(self)
+ if not rule.build_only:
+ self._matcher.add(rule)
+ self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
+ self._remap = True
+
+ def bind(
+ self,
+ server_name: str,
+ script_name: str | None = None,
+ subdomain: str | None = None,
+ url_scheme: str = "http",
+ default_method: str = "GET",
+ path_info: str | None = None,
+ query_args: t.Mapping[str, t.Any] | str | None = None,
+ ) -> MapAdapter:
+ """Return a new :class:`MapAdapter` with the details specified to the
+ call. Note that `script_name` will default to ``'/'`` if not further
+ specified or `None`. The `server_name` at least is a requirement
+ because the HTTP RFC requires absolute URLs for redirects and so all
+ redirect exceptions raised by Werkzeug will contain the full canonical
+ URL.
+
+ If no path_info is passed to :meth:`match` it will use the default path
+ info passed to bind. While this doesn't really make sense for
+ manual bind calls, it's useful if you bind a map to a WSGI
+ environment which already contains the path info.
+
+ `subdomain` will default to the `default_subdomain` for this map if
+ no defined. If there is no `default_subdomain` you cannot use the
+ subdomain feature.
+
+ .. versionchanged:: 1.0
+ If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
+ will match.
+
+ .. versionchanged:: 0.15
+ ``path_info`` defaults to ``'/'`` if ``None``.
+
+ .. versionchanged:: 0.8
+ ``query_args`` can be a string.
+
+ .. versionchanged:: 0.7
+ Added ``query_args``.
+ """
+ server_name = server_name.lower()
+ if self.host_matching:
+ if subdomain is not None:
+ raise RuntimeError("host matching enabled and a subdomain was provided")
+ elif subdomain is None:
+ subdomain = self.default_subdomain
+ if script_name is None:
+ script_name = "/"
+ if path_info is None:
+ path_info = "/"
+
+ # Port isn't part of IDNA, and might push a name over the 63 octet limit.
+ server_name, port_sep, port = server_name.partition(":")
+
+ try:
+ server_name = server_name.encode("idna").decode("ascii")
+ except UnicodeError as e:
+ raise BadHost() from e
+
+ return MapAdapter(
+ self,
+ f"{server_name}{port_sep}{port}",
+ script_name,
+ subdomain,
+ url_scheme,
+ path_info,
+ default_method,
+ query_args,
+ )
+
+ def bind_to_environ(
+ self,
+ environ: WSGIEnvironment | Request,
+ server_name: str | None = None,
+ subdomain: str | None = None,
+ ) -> MapAdapter:
+ """Like :meth:`bind` but you can pass it an WSGI environment and it
+ will fetch the information from that dictionary. Note that because of
+ limitations in the protocol there is no way to get the current
+ subdomain and real `server_name` from the environment. If you don't
+ provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
+ `HTTP_HOST` if provided) as used `server_name` with disabled subdomain
+ feature.
+
+ If `subdomain` is `None` but an environment and a server name is
+ provided it will calculate the current subdomain automatically.
+ Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
+ in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
+ subdomain will be ``'staging.dev'``.
+
+ If the object passed as environ has an environ attribute, the value of
+ this attribute is used instead. This allows you to pass request
+ objects. Additionally `PATH_INFO` added as a default of the
+ :class:`MapAdapter` so that you don't have to pass the path info to
+ the match method.
+
+ .. versionchanged:: 1.0.0
+ If the passed server name specifies port 443, it will match
+ if the incoming scheme is ``https`` without a port.
+
+ .. versionchanged:: 1.0.0
+ A warning is shown when the passed server name does not
+ match the incoming WSGI server name.
+
+ .. versionchanged:: 0.8
+ This will no longer raise a ValueError when an unexpected server
+ name was passed.
+
+ .. versionchanged:: 0.5
+ previously this method accepted a bogus `calculate_subdomain`
+ parameter that did not have any effect. It was removed because
+ of that.
+
+ :param environ: a WSGI environment.
+ :param server_name: an optional server name hint (see above).
+ :param subdomain: optionally the current subdomain (see above).
+ """
+ env = _get_environ(environ)
+ wsgi_server_name = get_host(env).lower()
+ scheme = env["wsgi.url_scheme"]
+ upgrade = any(
+ v.strip() == "upgrade"
+ for v in env.get("HTTP_CONNECTION", "").lower().split(",")
+ )
+
+ if upgrade and env.get("HTTP_UPGRADE", "").lower() == "websocket":
+ scheme = "wss" if scheme == "https" else "ws"
+
+ if server_name is None:
+ server_name = wsgi_server_name
+ else:
+ server_name = server_name.lower()
+
+ # strip standard port to match get_host()
+ if scheme in {"http", "ws"} and server_name.endswith(":80"):
+ server_name = server_name[:-3]
+ elif scheme in {"https", "wss"} and server_name.endswith(":443"):
+ server_name = server_name[:-4]
+
+ if subdomain is None and not self.host_matching:
+ cur_server_name = wsgi_server_name.split(".")
+ real_server_name = server_name.split(".")
+ offset = -len(real_server_name)
+
+ if cur_server_name[offset:] != real_server_name:
+ # This can happen even with valid configs if the server was
+ # accessed directly by IP address under some situations.
+ # Instead of raising an exception like in Werkzeug 0.7 or
+ # earlier we go by an invalid subdomain which will result
+ # in a 404 error on matching.
+ warnings.warn(
+ f"Current server name {wsgi_server_name!r} doesn't match configured"
+ f" server name {server_name!r}",
+ stacklevel=2,
+ )
+ subdomain = "<invalid>"
+ else:
+ subdomain = ".".join(filter(None, cur_server_name[:offset]))
+
+ def _get_wsgi_string(name: str) -> str | None:
+ val = env.get(name)
+ if val is not None:
+ return _wsgi_decoding_dance(val)
+ return None
+
+ script_name = _get_wsgi_string("SCRIPT_NAME")
+ path_info = _get_wsgi_string("PATH_INFO")
+ query_args = _get_wsgi_string("QUERY_STRING")
+ return Map.bind(
+ self,
+ server_name,
+ script_name,
+ subdomain,
+ scheme,
+ env["REQUEST_METHOD"],
+ path_info,
+ query_args=query_args,
+ )
+
+ def update(self) -> None:
+ """Called before matching and building to keep the compiled rules
+ in the correct order after things changed.
+ """
+ if not self._remap:
+ return
+
+ with self._remap_lock:
+ if not self._remap:
+ return
+
+ self._matcher.update()
+ for rules in self._rules_by_endpoint.values():
+ rules.sort(key=lambda x: x.build_compare_key())
+ self._remap = False
+
+ def __repr__(self) -> str:
+ rules = self.iter_rules()
+ return f"{type(self).__name__}({pformat(list(rules))})"
+
+
+class MapAdapter:
+ """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
+ the URL matching and building based on runtime information.
+ """
+
+ def __init__(
+ self,
+ map: Map,
+ server_name: str,
+ script_name: str,
+ subdomain: str | None,
+ url_scheme: str,
+ path_info: str,
+ default_method: str,
+ query_args: t.Mapping[str, t.Any] | str | None = None,
+ ):
+ self.map = map
+ self.server_name = server_name
+
+ if not script_name.endswith("/"):
+ script_name += "/"
+
+ self.script_name = script_name
+ self.subdomain = subdomain
+ self.url_scheme = url_scheme
+ self.path_info = path_info
+ self.default_method = default_method
+ self.query_args = query_args
+ self.websocket = self.url_scheme in {"ws", "wss"}
+
+ def dispatch(
+ self,
+ view_func: t.Callable[[str, t.Mapping[str, t.Any]], WSGIApplication],
+ path_info: str | None = None,
+ method: str | None = None,
+ catch_http_exceptions: bool = False,
+ ) -> WSGIApplication:
+ """Does the complete dispatching process. `view_func` is called with
+ the endpoint and a dict with the values for the view. It should
+ look up the view function, call it, and return a response object
+ or WSGI application. http exceptions are not caught by default
+ so that applications can display nicer error messages by just
+ catching them by hand. If you want to stick with the default
+ error messages you can pass it ``catch_http_exceptions=True`` and
+ it will catch the http exceptions.
+
+ Here a small example for the dispatch usage::
+
+ from werkzeug.wrappers import Request, Response
+ from werkzeug.wsgi import responder
+ from werkzeug.routing import Map, Rule
+
+ def on_index(request):
+ return Response('Hello from the index')
+
+ url_map = Map([Rule('/', endpoint='index')])
+ views = {'index': on_index}
+
+ @responder
+ def application(environ, start_response):
+ request = Request(environ)
+ urls = url_map.bind_to_environ(environ)
+ return urls.dispatch(lambda e, v: views[e](request, **v),
+ catch_http_exceptions=True)
+
+ Keep in mind that this method might return exception objects, too, so
+ use :class:`Response.force_type` to get a response object.
+
+ :param view_func: a function that is called with the endpoint as
+ first argument and the value dict as second. Has
+ to dispatch to the actual view function with this
+ information. (see above)
+ :param path_info: the path info to use for matching. Overrides the
+ path info specified on binding.
+ :param method: the HTTP method used for matching. Overrides the
+ method specified on binding.
+ :param catch_http_exceptions: set to `True` to catch any of the
+ werkzeug :class:`HTTPException`\\s.
+ """
+ try:
+ try:
+ endpoint, args = self.match(path_info, method)
+ except RequestRedirect as e:
+ return e
+ return view_func(endpoint, args)
+ except HTTPException as e:
+ if catch_http_exceptions:
+ return e
+ raise
+
+ @t.overload
+ def match(
+ self,
+ path_info: str | None = None,
+ method: str | None = None,
+ return_rule: t.Literal[False] = False,
+ query_args: t.Mapping[str, t.Any] | str | None = None,
+ websocket: bool | None = None,
+ ) -> tuple[t.Any, t.Mapping[str, t.Any]]: ...
+
+ @t.overload
+ def match(
+ self,
+ path_info: str | None = None,
+ method: str | None = None,
+ return_rule: t.Literal[True] = True,
+ query_args: t.Mapping[str, t.Any] | str | None = None,
+ websocket: bool | None = None,
+ ) -> tuple[Rule, t.Mapping[str, t.Any]]: ...
+
+ def match(
+ self,
+ path_info: str | None = None,
+ method: str | None = None,
+ return_rule: bool = False,
+ query_args: t.Mapping[str, t.Any] | str | None = None,
+ websocket: bool | None = None,
+ ) -> tuple[t.Any | Rule, t.Mapping[str, t.Any]]:
+ """The usage is simple: you just pass the match method the current
+ path info as well as the method (which defaults to `GET`). The
+ following things can then happen:
+
+ - you receive a `NotFound` exception that indicates that no URL is
+ matching. A `NotFound` exception is also a WSGI application you
+ can call to get a default page not found page (happens to be the
+ same object as `werkzeug.exceptions.NotFound`)
+
+ - you receive a `MethodNotAllowed` exception that indicates that there
+ is a match for this URL but not for the current request method.
+ This is useful for RESTful applications.
+
+ - you receive a `RequestRedirect` exception with a `new_url`
+ attribute. This exception is used to notify you about a request
+ Werkzeug requests from your WSGI application. This is for example the
+ case if you request ``/foo`` although the correct URL is ``/foo/``
+ You can use the `RequestRedirect` instance as response-like object
+ similar to all other subclasses of `HTTPException`.
+
+ - you receive a ``WebsocketMismatch`` exception if the only
+ match is a WebSocket rule but the bind is an HTTP request, or
+ if the match is an HTTP rule but the bind is a WebSocket
+ request.
+
+ - you get a tuple in the form ``(endpoint, arguments)`` if there is
+ a match (unless `return_rule` is True, in which case you get a tuple
+ in the form ``(rule, arguments)``)
+
+ If the path info is not passed to the match method the default path
+ info of the map is used (defaults to the root URL if not defined
+ explicitly).
+
+ All of the exceptions raised are subclasses of `HTTPException` so they
+ can be used as WSGI responses. They will all render generic error or
+ redirect pages.
+
+ Here is a small example for matching:
+
+ >>> m = Map([
+ ... Rule('/', endpoint='index'),
+ ... Rule('/downloads/', endpoint='downloads/index'),
+ ... Rule('/downloads/<int:id>', endpoint='downloads/show')
+ ... ])
+ >>> urls = m.bind("example.com", "/")
+ >>> urls.match("/", "GET")
+ ('index', {})
+ >>> urls.match("/downloads/42")
+ ('downloads/show', {'id': 42})
+
+ And here is what happens on redirect and missing URLs:
+
+ >>> urls.match("/downloads")
+ Traceback (most recent call last):
+ ...
+ RequestRedirect: http://example.com/downloads/
+ >>> urls.match("/missing")
+ Traceback (most recent call last):
+ ...
+ NotFound: 404 Not Found
+
+ :param path_info: the path info to use for matching. Overrides the
+ path info specified on binding.
+ :param method: the HTTP method used for matching. Overrides the
+ method specified on binding.
+ :param return_rule: return the rule that matched instead of just the
+ endpoint (defaults to `False`).
+ :param query_args: optional query arguments that are used for
+ automatic redirects as string or dictionary. It's
+ currently not possible to use the query arguments
+ for URL matching.
+ :param websocket: Match WebSocket instead of HTTP requests. A
+ websocket request has a ``ws`` or ``wss``
+ :attr:`url_scheme`. This overrides that detection.
+
+ .. versionadded:: 1.0
+ Added ``websocket``.
+
+ .. versionchanged:: 0.8
+ ``query_args`` can be a string.
+
+ .. versionadded:: 0.7
+ Added ``query_args``.
+
+ .. versionadded:: 0.6
+ Added ``return_rule``.
+ """
+ self.map.update()
+ if path_info is None:
+ path_info = self.path_info
+ if query_args is None:
+ query_args = self.query_args or {}
+ method = (method or self.default_method).upper()
+
+ if websocket is None:
+ websocket = self.websocket
+
+ domain_part = self.server_name
+
+ if not self.map.host_matching and self.subdomain is not None:
+ domain_part = self.subdomain
+
+ path_part = f"/{path_info.lstrip('/')}" if path_info else ""
+
+ try:
+ result = self.map._matcher.match(domain_part, path_part, method, websocket)
+ except RequestPath as e:
+ # safe = https://url.spec.whatwg.org/#url-path-segment-string
+ new_path = quote(e.path_info, safe="!$&'()*+,/:;=@")
+ raise RequestRedirect(
+ self.make_redirect_url(new_path, query_args)
+ ) from None
+ except RequestAliasRedirect as e:
+ raise RequestRedirect(
+ self.make_alias_redirect_url(
+ f"{domain_part}|{path_part}",
+ e.endpoint,
+ e.matched_values,
+ method,
+ query_args,
+ )
+ ) from None
+ except NoMatch as e:
+ if e.have_match_for:
+ raise MethodNotAllowed(valid_methods=list(e.have_match_for)) from None
+
+ if e.websocket_mismatch:
+ raise WebsocketMismatch() from None
+
+ raise NotFound() from None
+ else:
+ rule, rv = result
+
+ if self.map.redirect_defaults:
+ redirect_url = self.get_default_redirect(rule, method, rv, query_args)
+ if redirect_url is not None:
+ raise RequestRedirect(redirect_url)
+
+ if rule.redirect_to is not None:
+ if isinstance(rule.redirect_to, str):
+
+ def _handle_match(match: t.Match[str]) -> str:
+ value = rv[match.group(1)]
+ return rule._converters[match.group(1)].to_url(value)
+
+ redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
+ else:
+ redirect_url = rule.redirect_to(self, **rv)
+
+ if self.subdomain:
+ netloc = f"{self.subdomain}.{self.server_name}"
+ else:
+ netloc = self.server_name
+
+ raise RequestRedirect(
+ urljoin(
+ f"{self.url_scheme or 'http'}://{netloc}{self.script_name}",
+ redirect_url,
+ )
+ )
+
+ if return_rule:
+ return rule, rv
+ else:
+ return rule.endpoint, rv
+
+ def test(self, path_info: str | None = None, method: str | None = None) -> bool:
+ """Test if a rule would match. Works like `match` but returns `True`
+ if the URL matches, or `False` if it does not exist.
+
+ :param path_info: the path info to use for matching. Overrides the
+ path info specified on binding.
+ :param method: the HTTP method used for matching. Overrides the
+ method specified on binding.
+ """
+ try:
+ self.match(path_info, method)
+ except RequestRedirect:
+ pass
+ except HTTPException:
+ return False
+ return True
+
+ def allowed_methods(self, path_info: str | None = None) -> t.Iterable[str]:
+ """Returns the valid methods that match for a given path.
+
+ .. versionadded:: 0.7
+ """
+ try:
+ self.match(path_info, method="--")
+ except MethodNotAllowed as e:
+ return e.valid_methods # type: ignore
+ except HTTPException:
+ pass
+ return []
+
+ def get_host(self, domain_part: str | None) -> str:
+ """Figures out the full host name for the given domain part. The
+ domain part is a subdomain in case host matching is disabled or
+ a full host name.
+ """
+ if self.map.host_matching:
+ if domain_part is None:
+ return self.server_name
+
+ return domain_part
+
+ if domain_part is None:
+ subdomain = self.subdomain
+ else:
+ subdomain = domain_part
+
+ if subdomain:
+ return f"{subdomain}.{self.server_name}"
+ else:
+ return self.server_name
+
+ def get_default_redirect(
+ self,
+ rule: Rule,
+ method: str,
+ values: t.MutableMapping[str, t.Any],
+ query_args: t.Mapping[str, t.Any] | str,
+ ) -> str | None:
+ """A helper that returns the URL to redirect to if it finds one.
+ This is used for default redirecting only.
+
+ :internal:
+ """
+ assert self.map.redirect_defaults
+ for r in self.map._rules_by_endpoint[rule.endpoint]:
+ # every rule that comes after this one, including ourself
+ # has a lower priority for the defaults. We order the ones
+ # with the highest priority up for building.
+ if r is rule:
+ break
+ if r.provides_defaults_for(rule) and r.suitable_for(values, method):
+ values.update(r.defaults) # type: ignore
+ domain_part, path = r.build(values) # type: ignore
+ return self.make_redirect_url(path, query_args, domain_part=domain_part)
+ return None
+
+ def encode_query_args(self, query_args: t.Mapping[str, t.Any] | str) -> str:
+ if not isinstance(query_args, str):
+ return _urlencode(query_args)
+ return query_args
+
+ def make_redirect_url(
+ self,
+ path_info: str,
+ query_args: t.Mapping[str, t.Any] | str | None = None,
+ domain_part: str | None = None,
+ ) -> str:
+ """Creates a redirect URL.
+
+ :internal:
+ """
+ if query_args is None:
+ query_args = self.query_args
+
+ if query_args:
+ query_str = self.encode_query_args(query_args)
+ else:
+ query_str = None
+
+ scheme = self.url_scheme or "http"
+ host = self.get_host(domain_part)
+ path = "/".join((self.script_name.strip("/"), path_info.lstrip("/")))
+ return urlunsplit((scheme, host, path, query_str, None))
+
+ def make_alias_redirect_url(
+ self,
+ path: str,
+ endpoint: t.Any,
+ values: t.Mapping[str, t.Any],
+ method: str,
+ query_args: t.Mapping[str, t.Any] | str,
+ ) -> str:
+ """Internally called to make an alias redirect URL."""
+ url = self.build(
+ endpoint, values, method, append_unknown=False, force_external=True
+ )
+ if query_args:
+ url += f"?{self.encode_query_args(query_args)}"
+ assert url != path, "detected invalid alias setting. No canonical URL found"
+ return url
+
+ def _partial_build(
+ self,
+ endpoint: t.Any,
+ values: t.Mapping[str, t.Any],
+ method: str | None,
+ append_unknown: bool,
+ ) -> tuple[str, str, bool] | None:
+ """Helper for :meth:`build`. Returns subdomain and path for the
+ rule that accepts this endpoint, values and method.
+
+ :internal:
+ """
+ # in case the method is none, try with the default method first
+ if method is None:
+ rv = self._partial_build(
+ endpoint, values, self.default_method, append_unknown
+ )
+ if rv is not None:
+ return rv
+
+ # Default method did not match or a specific method is passed.
+ # Check all for first match with matching host. If no matching
+ # host is found, go with first result.
+ first_match = None
+
+ for rule in self.map._rules_by_endpoint.get(endpoint, ()):
+ if rule.suitable_for(values, method):
+ build_rv = rule.build(values, append_unknown)
+
+ if build_rv is not None:
+ rv = (build_rv[0], build_rv[1], rule.websocket)
+ if self.map.host_matching:
+ if rv[0] == self.server_name:
+ return rv
+ elif first_match is None:
+ first_match = rv
+ else:
+ return rv
+
+ return first_match
+
+ def build(
+ self,
+ endpoint: t.Any,
+ values: t.Mapping[str, t.Any] | None = None,
+ method: str | None = None,
+ force_external: bool = False,
+ append_unknown: bool = True,
+ url_scheme: str | None = None,
+ ) -> str:
+ """Building URLs works pretty much the other way round. Instead of
+ `match` you call `build` and pass it the endpoint and a dict of
+ arguments for the placeholders.
+
+ The `build` function also accepts an argument called `force_external`
+ which, if you set it to `True` will force external URLs. Per default
+ external URLs (include the server name) will only be used if the
+ target URL is on a different subdomain.
+
+ >>> m = Map([
+ ... Rule('/', endpoint='index'),
+ ... Rule('/downloads/', endpoint='downloads/index'),
+ ... Rule('/downloads/<int:id>', endpoint='downloads/show')
+ ... ])
+ >>> urls = m.bind("example.com", "/")
+ >>> urls.build("index", {})
+ '/'
+ >>> urls.build("downloads/show", {'id': 42})
+ '/downloads/42'
+ >>> urls.build("downloads/show", {'id': 42}, force_external=True)
+ 'http://example.com/downloads/42'
+
+ Because URLs cannot contain non ASCII data you will always get
+ bytes back. Non ASCII characters are urlencoded with the
+ charset defined on the map instance.
+
+ Additional values are converted to strings and appended to the URL as
+ URL querystring parameters:
+
+ >>> urls.build("index", {'q': 'My Searchstring'})
+ '/?q=My+Searchstring'
+
+ When processing those additional values, lists are furthermore
+ interpreted as multiple values (as per
+ :py:class:`werkzeug.datastructures.MultiDict`):
+
+ >>> urls.build("index", {'q': ['a', 'b', 'c']})
+ '/?q=a&q=b&q=c'
+
+ Passing a ``MultiDict`` will also add multiple values:
+
+ >>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
+ '/?p=z&q=a&q=b'
+
+ If a rule does not exist when building a `BuildError` exception is
+ raised.
+
+ The build method accepts an argument called `method` which allows you
+ to specify the method you want to have an URL built for if you have
+ different methods for the same endpoint specified.
+
+ :param endpoint: the endpoint of the URL to build.
+ :param values: the values for the URL to build. Unhandled values are
+ appended to the URL as query parameters.
+ :param method: the HTTP method for the rule if there are different
+ URLs for different methods on the same endpoint.
+ :param force_external: enforce full canonical external URLs. If the URL
+ scheme is not provided, this will generate
+ a protocol-relative URL.
+ :param append_unknown: unknown parameters are appended to the generated
+ URL as query string argument. Disable this
+ if you want the builder to ignore those.
+ :param url_scheme: Scheme to use in place of the bound
+ :attr:`url_scheme`.
+
+ .. versionchanged:: 2.0
+ Added the ``url_scheme`` parameter.
+
+ .. versionadded:: 0.6
+ Added the ``append_unknown`` parameter.
+ """
+ self.map.update()
+
+ if values:
+ if isinstance(values, MultiDict):
+ values = {
+ k: (v[0] if len(v) == 1 else v)
+ for k, v in dict.items(values)
+ if len(v) != 0
+ }
+ else: # plain dict
+ values = {k: v for k, v in values.items() if v is not None}
+ else:
+ values = {}
+
+ rv = self._partial_build(endpoint, values, method, append_unknown)
+ if rv is None:
+ raise BuildError(endpoint, values, method, self)
+
+ domain_part, path, websocket = rv
+ host = self.get_host(domain_part)
+
+ if url_scheme is None:
+ url_scheme = self.url_scheme
+
+ # Always build WebSocket routes with the scheme (browsers
+ # require full URLs). If bound to a WebSocket, ensure that HTTP
+ # routes are built with an HTTP scheme.
+ secure = url_scheme in {"https", "wss"}
+
+ if websocket:
+ force_external = True
+ url_scheme = "wss" if secure else "ws"
+ elif url_scheme:
+ url_scheme = "https" if secure else "http"
+
+ # shortcut this.
+ if not force_external and (
+ (self.map.host_matching and host == self.server_name)
+ or (not self.map.host_matching and domain_part == self.subdomain)
+ ):
+ return f"{self.script_name.rstrip('/')}/{path.lstrip('/')}"
+
+ scheme = f"{url_scheme}:" if url_scheme else ""
+ return f"{scheme}//{host}{self.script_name[:-1]}/{path.lstrip('/')}"
diff --git a/venv/lib/python3.8/site-packages/werkzeug/routing/matcher.py b/venv/lib/python3.8/site-packages/werkzeug/routing/matcher.py
new file mode 100644
index 0000000..1fd00ef
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/routing/matcher.py
@@ -0,0 +1,202 @@
+from __future__ import annotations
+
+import re
+import typing as t
+from dataclasses import dataclass
+from dataclasses import field
+
+from .converters import ValidationError
+from .exceptions import NoMatch
+from .exceptions import RequestAliasRedirect
+from .exceptions import RequestPath
+from .rules import Rule
+from .rules import RulePart
+
+
+class SlashRequired(Exception):
+ pass
+
+
+@dataclass
+class State:
+ """A representation of a rule state.
+
+ This includes the *rules* that correspond to the state and the
+ possible *static* and *dynamic* transitions to the next state.
+ """
+
+ dynamic: list[tuple[RulePart, State]] = field(default_factory=list)
+ rules: list[Rule] = field(default_factory=list)
+ static: dict[str, State] = field(default_factory=dict)
+
+
+class StateMachineMatcher:
+ def __init__(self, merge_slashes: bool) -> None:
+ self._root = State()
+ self.merge_slashes = merge_slashes
+
+ def add(self, rule: Rule) -> None:
+ state = self._root
+ for part in rule._parts:
+ if part.static:
+ state.static.setdefault(part.content, State())
+ state = state.static[part.content]
+ else:
+ for test_part, new_state in state.dynamic:
+ if test_part == part:
+ state = new_state
+ break
+ else:
+ new_state = State()
+ state.dynamic.append((part, new_state))
+ state = new_state
+ state.rules.append(rule)
+
+ def update(self) -> None:
+ # For every state the dynamic transitions should be sorted by
+ # the weight of the transition
+ state = self._root
+
+ def _update_state(state: State) -> None:
+ state.dynamic.sort(key=lambda entry: entry[0].weight)
+ for new_state in state.static.values():
+ _update_state(new_state)
+ for _, new_state in state.dynamic:
+ _update_state(new_state)
+
+ _update_state(state)
+
+ def match(
+ self, domain: str, path: str, method: str, websocket: bool
+ ) -> tuple[Rule, t.MutableMapping[str, t.Any]]:
+ # To match to a rule we need to start at the root state and
+ # try to follow the transitions until we find a match, or find
+ # there is no transition to follow.
+
+ have_match_for = set()
+ websocket_mismatch = False
+
+ def _match(
+ state: State, parts: list[str], values: list[str]
+ ) -> tuple[Rule, list[str]] | None:
+ # This function is meant to be called recursively, and will attempt
+ # to match the head part to the state's transitions.
+ nonlocal have_match_for, websocket_mismatch
+
+ # The base case is when all parts have been matched via
+ # transitions. Hence if there is a rule with methods &
+ # websocket that work return it and the dynamic values
+ # extracted.
+ if parts == []:
+ for rule in state.rules:
+ if rule.methods is not None and method not in rule.methods:
+ have_match_for.update(rule.methods)
+ elif rule.websocket != websocket:
+ websocket_mismatch = True
+ else:
+ return rule, values
+
+ # Test if there is a match with this path with a
+ # trailing slash, if so raise an exception to report
+ # that matching is possible with an additional slash
+ if "" in state.static:
+ for rule in state.static[""].rules:
+ if websocket == rule.websocket and (
+ rule.methods is None or method in rule.methods
+ ):
+ if rule.strict_slashes:
+ raise SlashRequired()
+ else:
+ return rule, values
+ return None
+
+ part = parts[0]
+ # To match this part try the static transitions first
+ if part in state.static:
+ rv = _match(state.static[part], parts[1:], values)
+ if rv is not None:
+ return rv
+ # No match via the static transitions, so try the dynamic
+ # ones.
+ for test_part, new_state in state.dynamic:
+ target = part
+ remaining = parts[1:]
+ # A final part indicates a transition that always
+ # consumes the remaining parts i.e. transitions to a
+ # final state.
+ if test_part.final:
+ target = "/".join(parts)
+ remaining = []
+ match = re.compile(test_part.content).match(target)
+ if match is not None:
+ if test_part.suffixed:
+ # If a part_isolating=False part has a slash suffix, remove the
+ # suffix from the match and check for the slash redirect next.
+ suffix = match.groups()[-1]
+ if suffix == "/":
+ remaining = [""]
+
+ converter_groups = sorted(
+ match.groupdict().items(), key=lambda entry: entry[0]
+ )
+ groups = [
+ value
+ for key, value in converter_groups
+ if key[:11] == "__werkzeug_"
+ ]
+ rv = _match(new_state, remaining, values + groups)
+ if rv is not None:
+ return rv
+
+ # If there is no match and the only part left is a
+ # trailing slash ("") consider rules that aren't
+ # strict-slashes as these should match if there is a final
+ # slash part.
+ if parts == [""]:
+ for rule in state.rules:
+ if rule.strict_slashes:
+ continue
+ if rule.methods is not None and method not in rule.methods:
+ have_match_for.update(rule.methods)
+ elif rule.websocket != websocket:
+ websocket_mismatch = True
+ else:
+ return rule, values
+
+ return None
+
+ try:
+ rv = _match(self._root, [domain, *path.split("/")], [])
+ except SlashRequired:
+ raise RequestPath(f"{path}/") from None
+
+ if self.merge_slashes and rv is None:
+ # Try to match again, but with slashes merged
+ path = re.sub("/{2,}?", "/", path)
+ try:
+ rv = _match(self._root, [domain, *path.split("/")], [])
+ except SlashRequired:
+ raise RequestPath(f"{path}/") from None
+ if rv is None or rv[0].merge_slashes is False:
+ raise NoMatch(have_match_for, websocket_mismatch)
+ else:
+ raise RequestPath(f"{path}")
+ elif rv is not None:
+ rule, values = rv
+
+ result = {}
+ for name, value in zip(rule._converters.keys(), values):
+ try:
+ value = rule._converters[name].to_python(value)
+ except ValidationError:
+ raise NoMatch(have_match_for, websocket_mismatch) from None
+ result[str(name)] = value
+ if rule.defaults:
+ result.update(rule.defaults)
+
+ if rule.alias and rule.map.redirect_defaults:
+ raise RequestAliasRedirect(result, rule.endpoint)
+
+ return rule, result
+
+ raise NoMatch(have_match_for, websocket_mismatch)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/routing/rules.py b/venv/lib/python3.8/site-packages/werkzeug/routing/rules.py
new file mode 100644
index 0000000..2dad31d
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/routing/rules.py
@@ -0,0 +1,928 @@
+from __future__ import annotations
+
+import ast
+import re
+import typing as t
+from dataclasses import dataclass
+from string import Template
+from types import CodeType
+from urllib.parse import quote
+
+from ..datastructures import iter_multi_items
+from ..urls import _urlencode
+from .converters import ValidationError
+
+if t.TYPE_CHECKING:
+ from .converters import BaseConverter
+ from .map import Map
+
+
+class Weighting(t.NamedTuple):
+ number_static_weights: int
+ static_weights: list[tuple[int, int]]
+ number_argument_weights: int
+ argument_weights: list[int]
+
+
+@dataclass
+class RulePart:
+ """A part of a rule.
+
+ Rules can be represented by parts as delimited by `/` with
+ instances of this class representing those parts. The *content* is
+ either the raw content if *static* or a regex string to match
+ against. The *weight* can be used to order parts when matching.
+
+ """
+
+ content: str
+ final: bool
+ static: bool
+ suffixed: bool
+ weight: Weighting
+
+
+_part_re = re.compile(
+ r"""
+ (?:
+ (?P<slash>/) # a slash
+ |
+ (?P<static>[^</]+) # static rule data
+ |
+ (?:
+ <
+ (?:
+ (?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
+ (?:\((?P<arguments>.*?)\))? # converter arguments
+ : # variable delimiter
+ )?
+ (?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
+ >
+ )
+ )
+ """,
+ re.VERBOSE,
+)
+
+_simple_rule_re = re.compile(r"<([^>]+)>")
+_converter_args_re = re.compile(
+ r"""
+ \s*
+ ((?P<name>\w+)\s*=\s*)?
+ (?P<value>
+ True|False|
+ \d+.\d+|
+ \d+.|
+ \d+|
+ [\w\d_.]+|
+ [urUR]?(?P<stringval>"[^"]*?"|'[^']*')
+ )\s*,
+ """,
+ re.VERBOSE,
+)
+
+
+_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
+
+
+def _find(value: str, target: str, pos: int) -> int:
+ """Find the *target* in *value* after *pos*.
+
+ Returns the *value* length if *target* isn't found.
+ """
+ try:
+ return value.index(target, pos)
+ except ValueError:
+ return len(value)
+
+
+def _pythonize(value: str) -> None | bool | int | float | str:
+ if value in _PYTHON_CONSTANTS:
+ return _PYTHON_CONSTANTS[value]
+ for convert in int, float:
+ try:
+ return convert(value)
+ except ValueError:
+ pass
+ if value[:1] == value[-1:] and value[0] in "\"'":
+ value = value[1:-1]
+ return str(value)
+
+
+def parse_converter_args(argstr: str) -> tuple[tuple[t.Any, ...], dict[str, t.Any]]:
+ argstr += ","
+ args = []
+ kwargs = {}
+ position = 0
+
+ for item in _converter_args_re.finditer(argstr):
+ if item.start() != position:
+ raise ValueError(
+ f"Cannot parse converter argument '{argstr[position:item.start()]}'"
+ )
+
+ value = item.group("stringval")
+ if value is None:
+ value = item.group("value")
+ value = _pythonize(value)
+ if not item.group("name"):
+ args.append(value)
+ else:
+ name = item.group("name")
+ kwargs[name] = value
+ position = item.end()
+
+ return tuple(args), kwargs
+
+
+class RuleFactory:
+ """As soon as you have more complex URL setups it's a good idea to use rule
+ factories to avoid repetitive tasks. Some of them are builtin, others can
+ be added by subclassing `RuleFactory` and overriding `get_rules`.
+ """
+
+ def get_rules(self, map: Map) -> t.Iterable[Rule]:
+ """Subclasses of `RuleFactory` have to override this method and return
+ an iterable of rules."""
+ raise NotImplementedError()
+
+
+class Subdomain(RuleFactory):
+ """All URLs provided by this factory have the subdomain set to a
+ specific domain. For example if you want to use the subdomain for
+ the current language this can be a good setup::
+
+ url_map = Map([
+ Rule('/', endpoint='#select_language'),
+ Subdomain('<string(length=2):lang_code>', [
+ Rule('/', endpoint='index'),
+ Rule('/about', endpoint='about'),
+ Rule('/help', endpoint='help')
+ ])
+ ])
+
+ All the rules except for the ``'#select_language'`` endpoint will now
+ listen on a two letter long subdomain that holds the language code
+ for the current request.
+ """
+
+ def __init__(self, subdomain: str, rules: t.Iterable[RuleFactory]) -> None:
+ self.subdomain = subdomain
+ self.rules = rules
+
+ def get_rules(self, map: Map) -> t.Iterator[Rule]:
+ for rulefactory in self.rules:
+ for rule in rulefactory.get_rules(map):
+ rule = rule.empty()
+ rule.subdomain = self.subdomain
+ yield rule
+
+
+class Submount(RuleFactory):
+ """Like `Subdomain` but prefixes the URL rule with a given string::
+
+ url_map = Map([
+ Rule('/', endpoint='index'),
+ Submount('/blog', [
+ Rule('/', endpoint='blog/index'),
+ Rule('/entry/<entry_slug>', endpoint='blog/show')
+ ])
+ ])
+
+ Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
+ """
+
+ def __init__(self, path: str, rules: t.Iterable[RuleFactory]) -> None:
+ self.path = path.rstrip("/")
+ self.rules = rules
+
+ def get_rules(self, map: Map) -> t.Iterator[Rule]:
+ for rulefactory in self.rules:
+ for rule in rulefactory.get_rules(map):
+ rule = rule.empty()
+ rule.rule = self.path + rule.rule
+ yield rule
+
+
+class EndpointPrefix(RuleFactory):
+ """Prefixes all endpoints (which must be strings for this factory) with
+ another string. This can be useful for sub applications::
+
+ url_map = Map([
+ Rule('/', endpoint='index'),
+ EndpointPrefix('blog/', [Submount('/blog', [
+ Rule('/', endpoint='index'),
+ Rule('/entry/<entry_slug>', endpoint='show')
+ ])])
+ ])
+ """
+
+ def __init__(self, prefix: str, rules: t.Iterable[RuleFactory]) -> None:
+ self.prefix = prefix
+ self.rules = rules
+
+ def get_rules(self, map: Map) -> t.Iterator[Rule]:
+ for rulefactory in self.rules:
+ for rule in rulefactory.get_rules(map):
+ rule = rule.empty()
+ rule.endpoint = self.prefix + rule.endpoint
+ yield rule
+
+
+class RuleTemplate:
+ """Returns copies of the rules wrapped and expands string templates in
+ the endpoint, rule, defaults or subdomain sections.
+
+ Here a small example for such a rule template::
+
+ from werkzeug.routing import Map, Rule, RuleTemplate
+
+ resource = RuleTemplate([
+ Rule('/$name/', endpoint='$name.list'),
+ Rule('/$name/<int:id>', endpoint='$name.show')
+ ])
+
+ url_map = Map([resource(name='user'), resource(name='page')])
+
+ When a rule template is called the keyword arguments are used to
+ replace the placeholders in all the string parameters.
+ """
+
+ def __init__(self, rules: t.Iterable[Rule]) -> None:
+ self.rules = list(rules)
+
+ def __call__(self, *args: t.Any, **kwargs: t.Any) -> RuleTemplateFactory:
+ return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
+
+
+class RuleTemplateFactory(RuleFactory):
+ """A factory that fills in template variables into rules. Used by
+ `RuleTemplate` internally.
+
+ :internal:
+ """
+
+ def __init__(
+ self, rules: t.Iterable[RuleFactory], context: dict[str, t.Any]
+ ) -> None:
+ self.rules = rules
+ self.context = context
+
+ def get_rules(self, map: Map) -> t.Iterator[Rule]:
+ for rulefactory in self.rules:
+ for rule in rulefactory.get_rules(map):
+ new_defaults = subdomain = None
+ if rule.defaults:
+ new_defaults = {}
+ for key, value in rule.defaults.items():
+ if isinstance(value, str):
+ value = Template(value).substitute(self.context)
+ new_defaults[key] = value
+ if rule.subdomain is not None:
+ subdomain = Template(rule.subdomain).substitute(self.context)
+ new_endpoint = rule.endpoint
+ if isinstance(new_endpoint, str):
+ new_endpoint = Template(new_endpoint).substitute(self.context)
+ yield Rule(
+ Template(rule.rule).substitute(self.context),
+ new_defaults,
+ subdomain,
+ rule.methods,
+ rule.build_only,
+ new_endpoint,
+ rule.strict_slashes,
+ )
+
+
+_ASTT = t.TypeVar("_ASTT", bound=ast.AST)
+
+
+def _prefix_names(src: str, expected_type: type[_ASTT]) -> _ASTT:
+ """ast parse and prefix names with `.` to avoid collision with user vars"""
+ tree: ast.AST = ast.parse(src).body[0]
+ if isinstance(tree, ast.Expr):
+ tree = tree.value
+ if not isinstance(tree, expected_type):
+ raise TypeError(
+ f"AST node is of type {type(tree).__name__}, not {expected_type.__name__}"
+ )
+ for node in ast.walk(tree):
+ if isinstance(node, ast.Name):
+ node.id = f".{node.id}"
+ return tree
+
+
+_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
+_IF_KWARGS_URL_ENCODE_CODE = """\
+if kwargs:
+ params = self._encode_query_vars(kwargs)
+ q = "?" if params else ""
+else:
+ q = params = ""
+"""
+_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE, ast.If)
+_URL_ENCODE_AST_NAMES = (
+ _prefix_names("q", ast.Name),
+ _prefix_names("params", ast.Name),
+)
+
+
+class Rule(RuleFactory):
+ """A Rule represents one URL pattern. There are some options for `Rule`
+ that change the way it behaves and are passed to the `Rule` constructor.
+ Note that besides the rule-string all arguments *must* be keyword arguments
+ in order to not break the application on Werkzeug upgrades.
+
+ `string`
+ Rule strings basically are just normal URL paths with placeholders in
+ the format ``<converter(arguments):name>`` where the converter and the
+ arguments are optional. If no converter is defined the `default`
+ converter is used which means `string` in the normal configuration.
+
+ URL rules that end with a slash are branch URLs, others are leaves.
+ If you have `strict_slashes` enabled (which is the default), all
+ branch URLs that are matched without a trailing slash will trigger a
+ redirect to the same URL with the missing slash appended.
+
+ The converters are defined on the `Map`.
+
+ `endpoint`
+ The endpoint for this rule. This can be anything. A reference to a
+ function, a string, a number etc. The preferred way is using a string
+ because the endpoint is used for URL generation.
+
+ `defaults`
+ An optional dict with defaults for other rules with the same endpoint.
+ This is a bit tricky but useful if you want to have unique URLs::
+
+ url_map = Map([
+ Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
+ Rule('/all/page/<int:page>', endpoint='all_entries')
+ ])
+
+ If a user now visits ``http://example.com/all/page/1`` they will be
+ redirected to ``http://example.com/all/``. If `redirect_defaults` is
+ disabled on the `Map` instance this will only affect the URL
+ generation.
+
+ `subdomain`
+ The subdomain rule string for this rule. If not specified the rule
+ only matches for the `default_subdomain` of the map. If the map is
+ not bound to a subdomain this feature is disabled.
+
+ Can be useful if you want to have user profiles on different subdomains
+ and all subdomains are forwarded to your application::
+
+ url_map = Map([
+ Rule('/', subdomain='<username>', endpoint='user/homepage'),
+ Rule('/stats', subdomain='<username>', endpoint='user/stats')
+ ])
+
+ `methods`
+ A sequence of http methods this rule applies to. If not specified, all
+ methods are allowed. For example this can be useful if you want different
+ endpoints for `POST` and `GET`. If methods are defined and the path
+ matches but the method matched against is not in this list or in the
+ list of another rule for that path the error raised is of the type
+ `MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
+ list of methods and `HEAD` is not, `HEAD` is added automatically.
+
+ `strict_slashes`
+ Override the `Map` setting for `strict_slashes` only for this rule. If
+ not specified the `Map` setting is used.
+
+ `merge_slashes`
+ Override :attr:`Map.merge_slashes` for this rule.
+
+ `build_only`
+ Set this to True and the rule will never match but will create a URL
+ that can be build. This is useful if you have resources on a subdomain
+ or folder that are not handled by the WSGI application (like static data)
+
+ `redirect_to`
+ If given this must be either a string or callable. In case of a
+ callable it's called with the url adapter that triggered the match and
+ the values of the URL as keyword arguments and has to return the target
+ for the redirect, otherwise it has to be a string with placeholders in
+ rule syntax::
+
+ def foo_with_slug(adapter, id):
+ # ask the database for the slug for the old id. this of
+ # course has nothing to do with werkzeug.
+ return f'foo/{Foo.get_slug_for_id(id)}'
+
+ url_map = Map([
+ Rule('/foo/<slug>', endpoint='foo'),
+ Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
+ Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
+ ])
+
+ When the rule is matched the routing system will raise a
+ `RequestRedirect` exception with the target for the redirect.
+
+ Keep in mind that the URL will be joined against the URL root of the
+ script so don't use a leading slash on the target URL unless you
+ really mean root of that domain.
+
+ `alias`
+ If enabled this rule serves as an alias for another rule with the same
+ endpoint and arguments.
+
+ `host`
+ If provided and the URL map has host matching enabled this can be
+ used to provide a match rule for the whole host. This also means
+ that the subdomain feature is disabled.
+
+ `websocket`
+ If ``True``, this rule is only matches for WebSocket (``ws://``,
+ ``wss://``) requests. By default, rules will only match for HTTP
+ requests.
+
+ .. versionchanged:: 2.1
+ Percent-encoded newlines (``%0a``), which are decoded by WSGI
+ servers, are considered when routing instead of terminating the
+ match early.
+
+ .. versionadded:: 1.0
+ Added ``websocket``.
+
+ .. versionadded:: 1.0
+ Added ``merge_slashes``.
+
+ .. versionadded:: 0.7
+ Added ``alias`` and ``host``.
+
+ .. versionchanged:: 0.6.1
+ ``HEAD`` is added to ``methods`` if ``GET`` is present.
+ """
+
+ def __init__(
+ self,
+ string: str,
+ defaults: t.Mapping[str, t.Any] | None = None,
+ subdomain: str | None = None,
+ methods: t.Iterable[str] | None = None,
+ build_only: bool = False,
+ endpoint: t.Any | None = None,
+ strict_slashes: bool | None = None,
+ merge_slashes: bool | None = None,
+ redirect_to: str | t.Callable[..., str] | None = None,
+ alias: bool = False,
+ host: str | None = None,
+ websocket: bool = False,
+ ) -> None:
+ if not string.startswith("/"):
+ raise ValueError(f"URL rule '{string}' must start with a slash.")
+
+ self.rule = string
+ self.is_leaf = not string.endswith("/")
+ self.is_branch = string.endswith("/")
+
+ self.map: Map = None # type: ignore
+ self.strict_slashes = strict_slashes
+ self.merge_slashes = merge_slashes
+ self.subdomain = subdomain
+ self.host = host
+ self.defaults = defaults
+ self.build_only = build_only
+ self.alias = alias
+ self.websocket = websocket
+
+ if methods is not None:
+ if isinstance(methods, str):
+ raise TypeError("'methods' should be a list of strings.")
+
+ methods = {x.upper() for x in methods}
+
+ if "HEAD" not in methods and "GET" in methods:
+ methods.add("HEAD")
+
+ if websocket and methods - {"GET", "HEAD", "OPTIONS"}:
+ raise ValueError(
+ "WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods."
+ )
+
+ self.methods = methods
+ self.endpoint: t.Any = endpoint
+ self.redirect_to = redirect_to
+
+ if defaults:
+ self.arguments = set(map(str, defaults))
+ else:
+ self.arguments = set()
+
+ self._converters: dict[str, BaseConverter] = {}
+ self._trace: list[tuple[bool, str]] = []
+ self._parts: list[RulePart] = []
+
+ def empty(self) -> Rule:
+ """
+ Return an unbound copy of this rule.
+
+ This can be useful if want to reuse an already bound URL for another
+ map. See ``get_empty_kwargs`` to override what keyword arguments are
+ provided to the new copy.
+ """
+ return type(self)(self.rule, **self.get_empty_kwargs())
+
+ def get_empty_kwargs(self) -> t.Mapping[str, t.Any]:
+ """
+ Provides kwargs for instantiating empty copy with empty()
+
+ Use this method to provide custom keyword arguments to the subclass of
+ ``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
+ has custom keyword arguments that are needed at instantiation.
+
+ Must return a ``dict`` that will be provided as kwargs to the new
+ instance of ``Rule``, following the initial ``self.rule`` value which
+ is always provided as the first, required positional argument.
+ """
+ defaults = None
+ if self.defaults:
+ defaults = dict(self.defaults)
+ return dict(
+ defaults=defaults,
+ subdomain=self.subdomain,
+ methods=self.methods,
+ build_only=self.build_only,
+ endpoint=self.endpoint,
+ strict_slashes=self.strict_slashes,
+ redirect_to=self.redirect_to,
+ alias=self.alias,
+ host=self.host,
+ )
+
+ def get_rules(self, map: Map) -> t.Iterator[Rule]:
+ yield self
+
+ def refresh(self) -> None:
+ """Rebinds and refreshes the URL. Call this if you modified the
+ rule in place.
+
+ :internal:
+ """
+ self.bind(self.map, rebind=True)
+
+ def bind(self, map: Map, rebind: bool = False) -> None:
+ """Bind the url to a map and create a regular expression based on
+ the information from the rule itself and the defaults from the map.
+
+ :internal:
+ """
+ if self.map is not None and not rebind:
+ raise RuntimeError(f"url rule {self!r} already bound to map {self.map!r}")
+ self.map = map
+ if self.strict_slashes is None:
+ self.strict_slashes = map.strict_slashes
+ if self.merge_slashes is None:
+ self.merge_slashes = map.merge_slashes
+ if self.subdomain is None:
+ self.subdomain = map.default_subdomain
+ self.compile()
+
+ def get_converter(
+ self,
+ variable_name: str,
+ converter_name: str,
+ args: tuple[t.Any, ...],
+ kwargs: t.Mapping[str, t.Any],
+ ) -> BaseConverter:
+ """Looks up the converter for the given parameter.
+
+ .. versionadded:: 0.9
+ """
+ if converter_name not in self.map.converters:
+ raise LookupError(f"the converter {converter_name!r} does not exist")
+ return self.map.converters[converter_name](self.map, *args, **kwargs)
+
+ def _encode_query_vars(self, query_vars: t.Mapping[str, t.Any]) -> str:
+ items: t.Iterable[tuple[str, str]] = iter_multi_items(query_vars)
+
+ if self.map.sort_parameters:
+ items = sorted(items, key=self.map.sort_key)
+
+ return _urlencode(items)
+
+ def _parse_rule(self, rule: str) -> t.Iterable[RulePart]:
+ content = ""
+ static = True
+ argument_weights = []
+ static_weights: list[tuple[int, int]] = []
+ final = False
+ convertor_number = 0
+
+ pos = 0
+ while pos < len(rule):
+ match = _part_re.match(rule, pos)
+ if match is None:
+ raise ValueError(f"malformed url rule: {rule!r}")
+
+ data = match.groupdict()
+ if data["static"] is not None:
+ static_weights.append((len(static_weights), -len(data["static"])))
+ self._trace.append((False, data["static"]))
+ content += data["static"] if static else re.escape(data["static"])
+
+ if data["variable"] is not None:
+ if static:
+ # Switching content to represent regex, hence the need to escape
+ content = re.escape(content)
+ static = False
+ c_args, c_kwargs = parse_converter_args(data["arguments"] or "")
+ convobj = self.get_converter(
+ data["variable"], data["converter"] or "default", c_args, c_kwargs
+ )
+ self._converters[data["variable"]] = convobj
+ self.arguments.add(data["variable"])
+ if not convobj.part_isolating:
+ final = True
+ content += f"(?P<__werkzeug_{convertor_number}>{convobj.regex})"
+ convertor_number += 1
+ argument_weights.append(convobj.weight)
+ self._trace.append((True, data["variable"]))
+
+ if data["slash"] is not None:
+ self._trace.append((False, "/"))
+ if final:
+ content += "/"
+ else:
+ if not static:
+ content += r"\Z"
+ weight = Weighting(
+ -len(static_weights),
+ static_weights,
+ -len(argument_weights),
+ argument_weights,
+ )
+ yield RulePart(
+ content=content,
+ final=final,
+ static=static,
+ suffixed=False,
+ weight=weight,
+ )
+ content = ""
+ static = True
+ argument_weights = []
+ static_weights = []
+ final = False
+ convertor_number = 0
+
+ pos = match.end()
+
+ suffixed = False
+ if final and content[-1] == "/":
+ # If a converter is part_isolating=False (matches slashes) and ends with a
+ # slash, augment the regex to support slash redirects.
+ suffixed = True
+ content = content[:-1] + "(?<!/)(/?)"
+ if not static:
+ content += r"\Z"
+ weight = Weighting(
+ -len(static_weights),
+ static_weights,
+ -len(argument_weights),
+ argument_weights,
+ )
+ yield RulePart(
+ content=content,
+ final=final,
+ static=static,
+ suffixed=suffixed,
+ weight=weight,
+ )
+ if suffixed:
+ yield RulePart(
+ content="", final=False, static=True, suffixed=False, weight=weight
+ )
+
+ def compile(self) -> None:
+ """Compiles the regular expression and stores it."""
+ assert self.map is not None, "rule not bound"
+
+ if self.map.host_matching:
+ domain_rule = self.host or ""
+ else:
+ domain_rule = self.subdomain or ""
+ self._parts = []
+ self._trace = []
+ self._converters = {}
+ if domain_rule == "":
+ self._parts = [
+ RulePart(
+ content="",
+ final=False,
+ static=True,
+ suffixed=False,
+ weight=Weighting(0, [], 0, []),
+ )
+ ]
+ else:
+ self._parts.extend(self._parse_rule(domain_rule))
+ self._trace.append((False, "|"))
+ rule = self.rule
+ if self.merge_slashes:
+ rule = re.sub("/{2,}?", "/", self.rule)
+ self._parts.extend(self._parse_rule(rule))
+
+ self._build: t.Callable[..., tuple[str, str]]
+ self._build = self._compile_builder(False).__get__(self, None)
+ self._build_unknown: t.Callable[..., tuple[str, str]]
+ self._build_unknown = self._compile_builder(True).__get__(self, None)
+
+ @staticmethod
+ def _get_func_code(code: CodeType, name: str) -> t.Callable[..., tuple[str, str]]:
+ globs: dict[str, t.Any] = {}
+ locs: dict[str, t.Any] = {}
+ exec(code, globs, locs)
+ return locs[name] # type: ignore
+
+ def _compile_builder(
+ self, append_unknown: bool = True
+ ) -> t.Callable[..., tuple[str, str]]:
+ defaults = self.defaults or {}
+ dom_ops: list[tuple[bool, str]] = []
+ url_ops: list[tuple[bool, str]] = []
+
+ opl = dom_ops
+ for is_dynamic, data in self._trace:
+ if data == "|" and opl is dom_ops:
+ opl = url_ops
+ continue
+ # this seems like a silly case to ever come up but:
+ # if a default is given for a value that appears in the rule,
+ # resolve it to a constant ahead of time
+ if is_dynamic and data in defaults:
+ data = self._converters[data].to_url(defaults[data])
+ opl.append((False, data))
+ elif not is_dynamic:
+ # safe = https://url.spec.whatwg.org/#url-path-segment-string
+ opl.append((False, quote(data, safe="!$&'()*+,/:;=@")))
+ else:
+ opl.append((True, data))
+
+ def _convert(elem: str) -> ast.Call:
+ ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem), ast.Call)
+ ret.args = [ast.Name(elem, ast.Load())]
+ return ret
+
+ def _parts(ops: list[tuple[bool, str]]) -> list[ast.expr]:
+ parts: list[ast.expr] = [
+ _convert(elem) if is_dynamic else ast.Constant(elem)
+ for is_dynamic, elem in ops
+ ]
+ parts = parts or [ast.Constant("")]
+ # constant fold
+ ret = [parts[0]]
+ for p in parts[1:]:
+ if isinstance(p, ast.Constant) and isinstance(ret[-1], ast.Constant):
+ ret[-1] = ast.Constant(ret[-1].value + p.value)
+ else:
+ ret.append(p)
+ return ret
+
+ dom_parts = _parts(dom_ops)
+ url_parts = _parts(url_ops)
+ body: list[ast.stmt]
+ if not append_unknown:
+ body = []
+ else:
+ body = [_IF_KWARGS_URL_ENCODE_AST]
+ url_parts.extend(_URL_ENCODE_AST_NAMES)
+
+ def _join(parts: list[ast.expr]) -> ast.expr:
+ if len(parts) == 1: # shortcut
+ return parts[0]
+ return ast.JoinedStr(parts)
+
+ body.append(
+ ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load()))
+ )
+
+ pargs = [
+ elem
+ for is_dynamic, elem in dom_ops + url_ops
+ if is_dynamic and elem not in defaults
+ ]
+ kargs = [str(k) for k in defaults]
+
+ func_ast = _prefix_names("def _(): pass", ast.FunctionDef)
+ func_ast.name = f"<builder:{self.rule!r}>"
+ func_ast.args.args.append(ast.arg(".self", None))
+ for arg in pargs + kargs:
+ func_ast.args.args.append(ast.arg(arg, None))
+ func_ast.args.kwarg = ast.arg(".kwargs", None)
+ for _ in kargs:
+ func_ast.args.defaults.append(ast.Constant(""))
+ func_ast.body = body
+
+ # Use `ast.parse` instead of `ast.Module` for better portability, since the
+ # signature of `ast.Module` can change.
+ module = ast.parse("")
+ module.body = [func_ast]
+
+ # mark everything as on line 1, offset 0
+ # less error-prone than `ast.fix_missing_locations`
+ # bad line numbers cause an assert to fail in debug builds
+ for node in ast.walk(module):
+ if "lineno" in node._attributes:
+ node.lineno = 1 # type: ignore[attr-defined]
+ if "end_lineno" in node._attributes:
+ node.end_lineno = node.lineno # type: ignore[attr-defined]
+ if "col_offset" in node._attributes:
+ node.col_offset = 0 # type: ignore[attr-defined]
+ if "end_col_offset" in node._attributes:
+ node.end_col_offset = node.col_offset # type: ignore[attr-defined]
+
+ code = compile(module, "<werkzeug routing>", "exec")
+ return self._get_func_code(code, func_ast.name)
+
+ def build(
+ self, values: t.Mapping[str, t.Any], append_unknown: bool = True
+ ) -> tuple[str, str] | None:
+ """Assembles the relative url for that rule and the subdomain.
+ If building doesn't work for some reasons `None` is returned.
+
+ :internal:
+ """
+ try:
+ if append_unknown:
+ return self._build_unknown(**values)
+ else:
+ return self._build(**values)
+ except ValidationError:
+ return None
+
+ def provides_defaults_for(self, rule: Rule) -> bool:
+ """Check if this rule has defaults for a given rule.
+
+ :internal:
+ """
+ return bool(
+ not self.build_only
+ and self.defaults
+ and self.endpoint == rule.endpoint
+ and self != rule
+ and self.arguments == rule.arguments
+ )
+
+ def suitable_for(
+ self, values: t.Mapping[str, t.Any], method: str | None = None
+ ) -> bool:
+ """Check if the dict of values has enough data for url generation.
+
+ :internal:
+ """
+ # if a method was given explicitly and that method is not supported
+ # by this rule, this rule is not suitable.
+ if (
+ method is not None
+ and self.methods is not None
+ and method not in self.methods
+ ):
+ return False
+
+ defaults = self.defaults or ()
+
+ # all arguments required must be either in the defaults dict or
+ # the value dictionary otherwise it's not suitable
+ for key in self.arguments:
+ if key not in defaults and key not in values:
+ return False
+
+ # in case defaults are given we ensure that either the value was
+ # skipped or the value is the same as the default value.
+ if defaults:
+ for key, value in defaults.items():
+ if key in values and value != values[key]:
+ return False
+
+ return True
+
+ def build_compare_key(self) -> tuple[int, int, int]:
+ """The build compare key for sorting.
+
+ :internal:
+ """
+ return (1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ()))
+
+ def __eq__(self, other: object) -> bool:
+ return isinstance(other, type(self)) and self._trace == other._trace
+
+ __hash__ = None # type: ignore
+
+ def __str__(self) -> str:
+ return self.rule
+
+ def __repr__(self) -> str:
+ if self.map is None:
+ return f"<{type(self).__name__} (unbound)>"
+ parts = []
+ for is_dynamic, data in self._trace:
+ if is_dynamic:
+ parts.append(f"<{data}>")
+ else:
+ parts.append(data)
+ parts_str = "".join(parts).lstrip("|")
+ methods = f" ({', '.join(self.methods)})" if self.methods is not None else ""
+ return f"<{type(self).__name__} {parts_str!r}{methods} -> {self.endpoint}>"
diff --git a/venv/lib/python3.8/site-packages/werkzeug/sansio/__init__.py b/venv/lib/python3.8/site-packages/werkzeug/sansio/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/sansio/__init__.py
diff --git a/venv/lib/python3.8/site-packages/werkzeug/sansio/http.py b/venv/lib/python3.8/site-packages/werkzeug/sansio/http.py
new file mode 100644
index 0000000..b2b8877
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/sansio/http.py
@@ -0,0 +1,171 @@
+from __future__ import annotations
+
+import re
+import typing as t
+from datetime import datetime
+
+from .._internal import _dt_as_utc
+from ..http import generate_etag
+from ..http import parse_date
+from ..http import parse_etags
+from ..http import parse_if_range_header
+from ..http import unquote_etag
+
+_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
+
+
+def is_resource_modified(
+ http_range: str | None = None,
+ http_if_range: str | None = None,
+ http_if_modified_since: str | None = None,
+ http_if_none_match: str | None = None,
+ http_if_match: str | None = None,
+ etag: str | None = None,
+ data: bytes | None = None,
+ last_modified: datetime | str | None = None,
+ ignore_if_range: bool = True,
+) -> bool:
+ """Convenience method for conditional requests.
+ :param http_range: Range HTTP header
+ :param http_if_range: If-Range HTTP header
+ :param http_if_modified_since: If-Modified-Since HTTP header
+ :param http_if_none_match: If-None-Match HTTP header
+ :param http_if_match: If-Match HTTP header
+ :param etag: the etag for the response for comparison.
+ :param data: or alternatively the data of the response to automatically
+ generate an etag using :func:`generate_etag`.
+ :param last_modified: an optional date of the last modification.
+ :param ignore_if_range: If `False`, `If-Range` header will be taken into
+ account.
+ :return: `True` if the resource was modified, otherwise `False`.
+
+ .. versionadded:: 2.2
+ """
+ if etag is None and data is not None:
+ etag = generate_etag(data)
+ elif data is not None:
+ raise TypeError("both data and etag given")
+
+ unmodified = False
+ if isinstance(last_modified, str):
+ last_modified = parse_date(last_modified)
+
+ # HTTP doesn't use microsecond, remove it to avoid false positive
+ # comparisons. Mark naive datetimes as UTC.
+ if last_modified is not None:
+ last_modified = _dt_as_utc(last_modified.replace(microsecond=0))
+
+ if_range = None
+ if not ignore_if_range and http_range is not None:
+ # https://tools.ietf.org/html/rfc7233#section-3.2
+ # A server MUST ignore an If-Range header field received in a request
+ # that does not contain a Range header field.
+ if_range = parse_if_range_header(http_if_range)
+
+ if if_range is not None and if_range.date is not None:
+ modified_since: datetime | None = if_range.date
+ else:
+ modified_since = parse_date(http_if_modified_since)
+
+ if modified_since and last_modified and last_modified <= modified_since:
+ unmodified = True
+
+ if etag:
+ etag, _ = unquote_etag(etag)
+ etag = t.cast(str, etag)
+
+ if if_range is not None and if_range.etag is not None:
+ unmodified = parse_etags(if_range.etag).contains(etag)
+ else:
+ if_none_match = parse_etags(http_if_none_match)
+ if if_none_match:
+ # https://tools.ietf.org/html/rfc7232#section-3.2
+ # "A recipient MUST use the weak comparison function when comparing
+ # entity-tags for If-None-Match"
+ unmodified = if_none_match.contains_weak(etag)
+
+ # https://tools.ietf.org/html/rfc7232#section-3.1
+ # "Origin server MUST use the strong comparison function when
+ # comparing entity-tags for If-Match"
+ if_match = parse_etags(http_if_match)
+ if if_match:
+ unmodified = not if_match.is_strong(etag)
+
+ return not unmodified
+
+
+_cookie_re = re.compile(
+ r"""
+ ([^=;]*)
+ (?:\s*=\s*
+ (
+ "(?:[^\\"]|\\.)*"
+ |
+ .*?
+ )
+ )?
+ \s*;\s*
+ """,
+ flags=re.ASCII | re.VERBOSE,
+)
+_cookie_unslash_re = re.compile(rb"\\([0-3][0-7]{2}|.)")
+
+
+def _cookie_unslash_replace(m: t.Match[bytes]) -> bytes:
+ v = m.group(1)
+
+ if len(v) == 1:
+ return v
+
+ return int(v, 8).to_bytes(1, "big")
+
+
+def parse_cookie(
+ cookie: str | None = None,
+ cls: type[ds.MultiDict[str, str]] | None = None,
+) -> ds.MultiDict[str, str]:
+ """Parse a cookie from a string.
+
+ The same key can be provided multiple times, the values are stored
+ in-order. The default :class:`MultiDict` will have the first value
+ first, and all values can be retrieved with
+ :meth:`MultiDict.getlist`.
+
+ :param cookie: The cookie header as a string.
+ :param cls: A dict-like class to store the parsed cookies in.
+ Defaults to :class:`MultiDict`.
+
+ .. versionchanged:: 3.0
+ Passing bytes, and the ``charset`` and ``errors`` parameters, were removed.
+
+ .. versionadded:: 2.2
+ """
+ if cls is None:
+ cls = t.cast("type[ds.MultiDict[str, str]]", ds.MultiDict)
+
+ if not cookie:
+ return cls()
+
+ cookie = f"{cookie};"
+ out = []
+
+ for ck, cv in _cookie_re.findall(cookie):
+ ck = ck.strip()
+ cv = cv.strip()
+
+ if not ck:
+ continue
+
+ if len(cv) >= 2 and cv[0] == cv[-1] == '"':
+ # Work with bytes here, since a UTF-8 character could be multiple bytes.
+ cv = _cookie_unslash_re.sub(
+ _cookie_unslash_replace, cv[1:-1].encode()
+ ).decode(errors="replace")
+
+ out.append((ck, cv))
+
+ return cls(out)
+
+
+# circular dependencies
+from .. import datastructures as ds
diff --git a/venv/lib/python3.8/site-packages/werkzeug/sansio/multipart.py b/venv/lib/python3.8/site-packages/werkzeug/sansio/multipart.py
new file mode 100644
index 0000000..731be03
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/sansio/multipart.py
@@ -0,0 +1,323 @@
+from __future__ import annotations
+
+import re
+import typing as t
+from dataclasses import dataclass
+from enum import auto
+from enum import Enum
+
+from ..datastructures import Headers
+from ..exceptions import RequestEntityTooLarge
+from ..http import parse_options_header
+
+
+class Event:
+ pass
+
+
+@dataclass(frozen=True)
+class Preamble(Event):
+ data: bytes
+
+
+@dataclass(frozen=True)
+class Field(Event):
+ name: str
+ headers: Headers
+
+
+@dataclass(frozen=True)
+class File(Event):
+ name: str
+ filename: str
+ headers: Headers
+
+
+@dataclass(frozen=True)
+class Data(Event):
+ data: bytes
+ more_data: bool
+
+
+@dataclass(frozen=True)
+class Epilogue(Event):
+ data: bytes
+
+
+class NeedData(Event):
+ pass
+
+
+NEED_DATA = NeedData()
+
+
+class State(Enum):
+ PREAMBLE = auto()
+ PART = auto()
+ DATA = auto()
+ DATA_START = auto()
+ EPILOGUE = auto()
+ COMPLETE = auto()
+
+
+# Multipart line breaks MUST be CRLF (\r\n) by RFC-7578, except that
+# many implementations break this and either use CR or LF alone.
+LINE_BREAK = b"(?:\r\n|\n|\r)"
+BLANK_LINE_RE = re.compile(b"(?:\r\n\r\n|\r\r|\n\n)", re.MULTILINE)
+LINE_BREAK_RE = re.compile(LINE_BREAK, re.MULTILINE)
+# Header values can be continued via a space or tab after the linebreak, as
+# per RFC2231
+HEADER_CONTINUATION_RE = re.compile(b"%s[ \t]" % LINE_BREAK, re.MULTILINE)
+# This must be long enough to contain any line breaks plus any
+# additional boundary markers (--) such that they will be found in a
+# subsequent search
+SEARCH_EXTRA_LENGTH = 8
+
+
+class MultipartDecoder:
+ """Decodes a multipart message as bytes into Python events.
+
+ The part data is returned as available to allow the caller to save
+ the data from memory to disk, if desired.
+ """
+
+ def __init__(
+ self,
+ boundary: bytes,
+ max_form_memory_size: int | None = None,
+ *,
+ max_parts: int | None = None,
+ ) -> None:
+ self.buffer = bytearray()
+ self.complete = False
+ self.max_form_memory_size = max_form_memory_size
+ self.max_parts = max_parts
+ self.state = State.PREAMBLE
+ self.boundary = boundary
+
+ # Note in the below \h i.e. horizontal whitespace is used
+ # as [^\S\n\r] as \h isn't supported in python.
+
+ # The preamble must end with a boundary where the boundary is
+ # prefixed by a line break, RFC2046. Except that many
+ # implementations including Werkzeug's tests omit the line
+ # break prefix. In addition the first boundary could be the
+ # epilogue boundary (for empty form-data) hence the matching
+ # group to understand if it is an epilogue boundary.
+ self.preamble_re = re.compile(
+ rb"%s?--%s(--[^\S\n\r]*%s?|[^\S\n\r]*%s)"
+ % (LINE_BREAK, re.escape(boundary), LINE_BREAK, LINE_BREAK),
+ re.MULTILINE,
+ )
+ # A boundary must include a line break prefix and suffix, and
+ # may include trailing whitespace. In addition the boundary
+ # could be the epilogue boundary hence the matching group to
+ # understand if it is an epilogue boundary.
+ self.boundary_re = re.compile(
+ rb"%s--%s(--[^\S\n\r]*%s?|[^\S\n\r]*%s)"
+ % (LINE_BREAK, re.escape(boundary), LINE_BREAK, LINE_BREAK),
+ re.MULTILINE,
+ )
+ self._search_position = 0
+ self._parts_decoded = 0
+
+ def last_newline(self, data: bytes) -> int:
+ try:
+ last_nl = data.rindex(b"\n")
+ except ValueError:
+ last_nl = len(data)
+ try:
+ last_cr = data.rindex(b"\r")
+ except ValueError:
+ last_cr = len(data)
+
+ return min(last_nl, last_cr)
+
+ def receive_data(self, data: bytes | None) -> None:
+ if data is None:
+ self.complete = True
+ elif (
+ self.max_form_memory_size is not None
+ and len(self.buffer) + len(data) > self.max_form_memory_size
+ ):
+ # Ensure that data within single event does not exceed limit.
+ # Also checked across accumulated events in MultiPartParser.
+ raise RequestEntityTooLarge()
+ else:
+ self.buffer.extend(data)
+
+ def next_event(self) -> Event:
+ event: Event = NEED_DATA
+
+ if self.state == State.PREAMBLE:
+ match = self.preamble_re.search(self.buffer, self._search_position)
+ if match is not None:
+ if match.group(1).startswith(b"--"):
+ self.state = State.EPILOGUE
+ else:
+ self.state = State.PART
+ data = bytes(self.buffer[: match.start()])
+ del self.buffer[: match.end()]
+ event = Preamble(data=data)
+ self._search_position = 0
+ else:
+ # Update the search start position to be equal to the
+ # current buffer length (already searched) minus a
+ # safe buffer for part of the search target.
+ self._search_position = max(
+ 0, len(self.buffer) - len(self.boundary) - SEARCH_EXTRA_LENGTH
+ )
+
+ elif self.state == State.PART:
+ match = BLANK_LINE_RE.search(self.buffer, self._search_position)
+ if match is not None:
+ headers = self._parse_headers(self.buffer[: match.start()])
+ # The final header ends with a single CRLF, however a
+ # blank line indicates the start of the
+ # body. Therefore the end is after the first CRLF.
+ headers_end = (match.start() + match.end()) // 2
+ del self.buffer[:headers_end]
+
+ if "content-disposition" not in headers:
+ raise ValueError("Missing Content-Disposition header")
+
+ disposition, extra = parse_options_header(
+ headers["content-disposition"]
+ )
+ name = t.cast(str, extra.get("name"))
+ filename = extra.get("filename")
+ if filename is not None:
+ event = File(
+ filename=filename,
+ headers=headers,
+ name=name,
+ )
+ else:
+ event = Field(
+ headers=headers,
+ name=name,
+ )
+ self.state = State.DATA_START
+ self._search_position = 0
+ self._parts_decoded += 1
+
+ if self.max_parts is not None and self._parts_decoded > self.max_parts:
+ raise RequestEntityTooLarge()
+ else:
+ # Update the search start position to be equal to the
+ # current buffer length (already searched) minus a
+ # safe buffer for part of the search target.
+ self._search_position = max(0, len(self.buffer) - SEARCH_EXTRA_LENGTH)
+
+ elif self.state == State.DATA_START:
+ data, del_index, more_data = self._parse_data(self.buffer, start=True)
+ del self.buffer[:del_index]
+ event = Data(data=data, more_data=more_data)
+ if more_data:
+ self.state = State.DATA
+
+ elif self.state == State.DATA:
+ data, del_index, more_data = self._parse_data(self.buffer, start=False)
+ del self.buffer[:del_index]
+ if data or not more_data:
+ event = Data(data=data, more_data=more_data)
+
+ elif self.state == State.EPILOGUE and self.complete:
+ event = Epilogue(data=bytes(self.buffer))
+ del self.buffer[:]
+ self.state = State.COMPLETE
+
+ if self.complete and isinstance(event, NeedData):
+ raise ValueError(f"Invalid form-data cannot parse beyond {self.state}")
+
+ return event
+
+ def _parse_headers(self, data: bytes) -> Headers:
+ headers: list[tuple[str, str]] = []
+ # Merge the continued headers into one line
+ data = HEADER_CONTINUATION_RE.sub(b" ", data)
+ # Now there is one header per line
+ for line in data.splitlines():
+ line = line.strip()
+
+ if line != b"":
+ name, _, value = line.decode().partition(":")
+ headers.append((name.strip(), value.strip()))
+ return Headers(headers)
+
+ def _parse_data(self, data: bytes, *, start: bool) -> tuple[bytes, int, bool]:
+ # Body parts must start with CRLF (or CR or LF)
+ if start:
+ match = LINE_BREAK_RE.match(data)
+ data_start = t.cast(t.Match[bytes], match).end()
+ else:
+ data_start = 0
+
+ boundary = b"--" + self.boundary
+
+ if self.buffer.find(boundary) == -1:
+ # No complete boundary in the buffer, but there may be
+ # a partial boundary at the end. As the boundary
+ # starts with either a nl or cr find the earliest and
+ # return up to that as data.
+ data_end = del_index = self.last_newline(data[data_start:]) + data_start
+ # If amount of data after last newline is far from
+ # possible length of partial boundary, we should
+ # assume that there is no partial boundary in the buffer
+ # and return all pending data.
+ if (len(data) - data_end) > len(b"\n" + boundary):
+ data_end = del_index = len(data)
+ more_data = True
+ else:
+ match = self.boundary_re.search(data)
+ if match is not None:
+ if match.group(1).startswith(b"--"):
+ self.state = State.EPILOGUE
+ else:
+ self.state = State.PART
+ data_end = match.start()
+ del_index = match.end()
+ else:
+ data_end = del_index = self.last_newline(data[data_start:]) + data_start
+ more_data = match is None
+
+ return bytes(data[data_start:data_end]), del_index, more_data
+
+
+class MultipartEncoder:
+ def __init__(self, boundary: bytes) -> None:
+ self.boundary = boundary
+ self.state = State.PREAMBLE
+
+ def send_event(self, event: Event) -> bytes:
+ if isinstance(event, Preamble) and self.state == State.PREAMBLE:
+ self.state = State.PART
+ return event.data
+ elif isinstance(event, (Field, File)) and self.state in {
+ State.PREAMBLE,
+ State.PART,
+ State.DATA,
+ }:
+ data = b"\r\n--" + self.boundary + b"\r\n"
+ data += b'Content-Disposition: form-data; name="%s"' % event.name.encode()
+ if isinstance(event, File):
+ data += b'; filename="%s"' % event.filename.encode()
+ data += b"\r\n"
+ for name, value in t.cast(Field, event).headers:
+ if name.lower() != "content-disposition":
+ data += f"{name}: {value}\r\n".encode()
+ self.state = State.DATA_START
+ return data
+ elif isinstance(event, Data) and self.state == State.DATA_START:
+ self.state = State.DATA
+ if len(event.data) > 0:
+ return b"\r\n" + event.data
+ else:
+ return event.data
+ elif isinstance(event, Data) and self.state == State.DATA:
+ return event.data
+ elif isinstance(event, Epilogue):
+ self.state = State.COMPLETE
+ return b"\r\n--" + self.boundary + b"--\r\n" + event.data
+ else:
+ raise ValueError(f"Cannot generate {event} in state: {self.state}")
diff --git a/venv/lib/python3.8/site-packages/werkzeug/sansio/request.py b/venv/lib/python3.8/site-packages/werkzeug/sansio/request.py
new file mode 100644
index 0000000..dd0805d
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/sansio/request.py
@@ -0,0 +1,536 @@
+from __future__ import annotations
+
+import typing as t
+from datetime import datetime
+from urllib.parse import parse_qsl
+
+from ..datastructures import Accept
+from ..datastructures import Authorization
+from ..datastructures import CharsetAccept
+from ..datastructures import ETags
+from ..datastructures import Headers
+from ..datastructures import HeaderSet
+from ..datastructures import IfRange
+from ..datastructures import ImmutableList
+from ..datastructures import ImmutableMultiDict
+from ..datastructures import LanguageAccept
+from ..datastructures import MIMEAccept
+from ..datastructures import MultiDict
+from ..datastructures import Range
+from ..datastructures import RequestCacheControl
+from ..http import parse_accept_header
+from ..http import parse_cache_control_header
+from ..http import parse_date
+from ..http import parse_etags
+from ..http import parse_if_range_header
+from ..http import parse_list_header
+from ..http import parse_options_header
+from ..http import parse_range_header
+from ..http import parse_set_header
+from ..user_agent import UserAgent
+from ..utils import cached_property
+from ..utils import header_property
+from .http import parse_cookie
+from .utils import get_content_length
+from .utils import get_current_url
+from .utils import get_host
+
+
+class Request:
+ """Represents the non-IO parts of a HTTP request, including the
+ method, URL info, and headers.
+
+ This class is not meant for general use. It should only be used when
+ implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
+ provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`.
+
+ :param method: The method the request was made with, such as
+ ``GET``.
+ :param scheme: The URL scheme of the protocol the request used, such
+ as ``https`` or ``wss``.
+ :param server: The address of the server. ``(host, port)``,
+ ``(path, None)`` for unix sockets, or ``None`` if not known.
+ :param root_path: The prefix that the application is mounted under.
+ This is prepended to generated URLs, but is not part of route
+ matching.
+ :param path: The path part of the URL after ``root_path``.
+ :param query_string: The part of the URL after the "?".
+ :param headers: The headers received with the request.
+ :param remote_addr: The address of the client sending the request.
+
+ .. versionchanged:: 3.0
+ The ``charset``, ``url_charset``, and ``encoding_errors`` attributes
+ were removed.
+
+ .. versionadded:: 2.0
+ """
+
+ #: the class to use for `args` and `form`. The default is an
+ #: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
+ #: multiple values per key. alternatively it makes sense to use an
+ #: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
+ #: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
+ #: which is the fastest but only remembers the last key. It is also
+ #: possible to use mutable structures, but this is not recommended.
+ #:
+ #: .. versionadded:: 0.6
+ parameter_storage_class: type[MultiDict[str, t.Any]] = ImmutableMultiDict
+
+ #: The type to be used for dict values from the incoming WSGI
+ #: environment. (For example for :attr:`cookies`.) By default an
+ #: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.
+ #:
+ #: .. versionchanged:: 1.0.0
+ #: Changed to ``ImmutableMultiDict`` to support multiple values.
+ #:
+ #: .. versionadded:: 0.6
+ dict_storage_class: type[MultiDict[str, t.Any]] = ImmutableMultiDict
+
+ #: the type to be used for list values from the incoming WSGI environment.
+ #: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
+ #: (for example for :attr:`access_list`).
+ #:
+ #: .. versionadded:: 0.6
+ list_storage_class: type[list[t.Any]] = ImmutableList
+
+ user_agent_class: type[UserAgent] = UserAgent
+ """The class used and returned by the :attr:`user_agent` property to
+ parse the header. Defaults to
+ :class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An
+ extension can provide a subclass that uses a parser to provide other
+ data.
+
+ .. versionadded:: 2.0
+ """
+
+ #: Valid host names when handling requests. By default all hosts are
+ #: trusted, which means that whatever the client says the host is
+ #: will be accepted.
+ #:
+ #: Because ``Host`` and ``X-Forwarded-Host`` headers can be set to
+ #: any value by a malicious client, it is recommended to either set
+ #: this property or implement similar validation in the proxy (if
+ #: the application is being run behind one).
+ #:
+ #: .. versionadded:: 0.9
+ trusted_hosts: list[str] | None = None
+
+ def __init__(
+ self,
+ method: str,
+ scheme: str,
+ server: tuple[str, int | None] | None,
+ root_path: str,
+ path: str,
+ query_string: bytes,
+ headers: Headers,
+ remote_addr: str | None,
+ ) -> None:
+ #: The method the request was made with, such as ``GET``.
+ self.method = method.upper()
+ #: The URL scheme of the protocol the request used, such as
+ #: ``https`` or ``wss``.
+ self.scheme = scheme
+ #: The address of the server. ``(host, port)``, ``(path, None)``
+ #: for unix sockets, or ``None`` if not known.
+ self.server = server
+ #: The prefix that the application is mounted under, without a
+ #: trailing slash. :attr:`path` comes after this.
+ self.root_path = root_path.rstrip("/")
+ #: The path part of the URL after :attr:`root_path`. This is the
+ #: path used for routing within the application.
+ self.path = "/" + path.lstrip("/")
+ #: The part of the URL after the "?". This is the raw value, use
+ #: :attr:`args` for the parsed values.
+ self.query_string = query_string
+ #: The headers received with the request.
+ self.headers = headers
+ #: The address of the client sending the request.
+ self.remote_addr = remote_addr
+
+ def __repr__(self) -> str:
+ try:
+ url = self.url
+ except Exception as e:
+ url = f"(invalid URL: {e})"
+
+ return f"<{type(self).__name__} {url!r} [{self.method}]>"
+
+ @cached_property
+ def args(self) -> MultiDict[str, str]:
+ """The parsed URL parameters (the part in the URL after the question
+ mark).
+
+ By default an
+ :class:`~werkzeug.datastructures.ImmutableMultiDict`
+ is returned from this function. This can be changed by setting
+ :attr:`parameter_storage_class` to a different type. This might
+ be necessary if the order of the form data is important.
+
+ .. versionchanged:: 2.3
+ Invalid bytes remain percent encoded.
+ """
+ return self.parameter_storage_class(
+ parse_qsl(
+ self.query_string.decode(),
+ keep_blank_values=True,
+ errors="werkzeug.url_quote",
+ )
+ )
+
+ @cached_property
+ def access_route(self) -> list[str]:
+ """If a forwarded header exists this is a list of all ip addresses
+ from the client ip to the last proxy server.
+ """
+ if "X-Forwarded-For" in self.headers:
+ return self.list_storage_class(
+ parse_list_header(self.headers["X-Forwarded-For"])
+ )
+ elif self.remote_addr is not None:
+ return self.list_storage_class([self.remote_addr])
+ return self.list_storage_class()
+
+ @cached_property
+ def full_path(self) -> str:
+ """Requested path, including the query string."""
+ return f"{self.path}?{self.query_string.decode()}"
+
+ @property
+ def is_secure(self) -> bool:
+ """``True`` if the request was made with a secure protocol
+ (HTTPS or WSS).
+ """
+ return self.scheme in {"https", "wss"}
+
+ @cached_property
+ def url(self) -> str:
+ """The full request URL with the scheme, host, root path, path,
+ and query string."""
+ return get_current_url(
+ self.scheme, self.host, self.root_path, self.path, self.query_string
+ )
+
+ @cached_property
+ def base_url(self) -> str:
+ """Like :attr:`url` but without the query string."""
+ return get_current_url(self.scheme, self.host, self.root_path, self.path)
+
+ @cached_property
+ def root_url(self) -> str:
+ """The request URL scheme, host, and root path. This is the root
+ that the application is accessed from.
+ """
+ return get_current_url(self.scheme, self.host, self.root_path)
+
+ @cached_property
+ def host_url(self) -> str:
+ """The request URL scheme and host only."""
+ return get_current_url(self.scheme, self.host)
+
+ @cached_property
+ def host(self) -> str:
+ """The host name the request was made to, including the port if
+ it's non-standard. Validated with :attr:`trusted_hosts`.
+ """
+ return get_host(
+ self.scheme, self.headers.get("host"), self.server, self.trusted_hosts
+ )
+
+ @cached_property
+ def cookies(self) -> ImmutableMultiDict[str, str]:
+ """A :class:`dict` with the contents of all cookies transmitted with
+ the request."""
+ wsgi_combined_cookie = ";".join(self.headers.getlist("Cookie"))
+ return parse_cookie( # type: ignore
+ wsgi_combined_cookie, cls=self.dict_storage_class
+ )
+
+ # Common Descriptors
+
+ content_type = header_property[str](
+ "Content-Type",
+ doc="""The Content-Type entity-header field indicates the media
+ type of the entity-body sent to the recipient or, in the case of
+ the HEAD method, the media type that would have been sent had
+ the request been a GET.""",
+ read_only=True,
+ )
+
+ @cached_property
+ def content_length(self) -> int | None:
+ """The Content-Length entity-header field indicates the size of the
+ entity-body in bytes or, in the case of the HEAD method, the size of
+ the entity-body that would have been sent had the request been a
+ GET.
+ """
+ return get_content_length(
+ http_content_length=self.headers.get("Content-Length"),
+ http_transfer_encoding=self.headers.get("Transfer-Encoding"),
+ )
+
+ content_encoding = header_property[str](
+ "Content-Encoding",
+ doc="""The Content-Encoding entity-header field is used as a
+ modifier to the media-type. When present, its value indicates
+ what additional content codings have been applied to the
+ entity-body, and thus what decoding mechanisms must be applied
+ in order to obtain the media-type referenced by the Content-Type
+ header field.
+
+ .. versionadded:: 0.9""",
+ read_only=True,
+ )
+ content_md5 = header_property[str](
+ "Content-MD5",
+ doc="""The Content-MD5 entity-header field, as defined in
+ RFC 1864, is an MD5 digest of the entity-body for the purpose of
+ providing an end-to-end message integrity check (MIC) of the
+ entity-body. (Note: a MIC is good for detecting accidental
+ modification of the entity-body in transit, but is not proof
+ against malicious attacks.)
+
+ .. versionadded:: 0.9""",
+ read_only=True,
+ )
+ referrer = header_property[str](
+ "Referer",
+ doc="""The Referer[sic] request-header field allows the client
+ to specify, for the server's benefit, the address (URI) of the
+ resource from which the Request-URI was obtained (the
+ "referrer", although the header field is misspelled).""",
+ read_only=True,
+ )
+ date = header_property(
+ "Date",
+ None,
+ parse_date,
+ doc="""The Date general-header field represents the date and
+ time at which the message was originated, having the same
+ semantics as orig-date in RFC 822.
+
+ .. versionchanged:: 2.0
+ The datetime object is timezone-aware.
+ """,
+ read_only=True,
+ )
+ max_forwards = header_property(
+ "Max-Forwards",
+ None,
+ int,
+ doc="""The Max-Forwards request-header field provides a
+ mechanism with the TRACE and OPTIONS methods to limit the number
+ of proxies or gateways that can forward the request to the next
+ inbound server.""",
+ read_only=True,
+ )
+
+ def _parse_content_type(self) -> None:
+ if not hasattr(self, "_parsed_content_type"):
+ self._parsed_content_type = parse_options_header(
+ self.headers.get("Content-Type", "")
+ )
+
+ @property
+ def mimetype(self) -> str:
+ """Like :attr:`content_type`, but without parameters (eg, without
+ charset, type etc.) and always lowercase. For example if the content
+ type is ``text/HTML; charset=utf-8`` the mimetype would be
+ ``'text/html'``.
+ """
+ self._parse_content_type()
+ return self._parsed_content_type[0].lower()
+
+ @property
+ def mimetype_params(self) -> dict[str, str]:
+ """The mimetype parameters as dict. For example if the content
+ type is ``text/html; charset=utf-8`` the params would be
+ ``{'charset': 'utf-8'}``.
+ """
+ self._parse_content_type()
+ return self._parsed_content_type[1]
+
+ @cached_property
+ def pragma(self) -> HeaderSet:
+ """The Pragma general-header field is used to include
+ implementation-specific directives that might apply to any recipient
+ along the request/response chain. All pragma directives specify
+ optional behavior from the viewpoint of the protocol; however, some
+ systems MAY require that behavior be consistent with the directives.
+ """
+ return parse_set_header(self.headers.get("Pragma", ""))
+
+ # Accept
+
+ @cached_property
+ def accept_mimetypes(self) -> MIMEAccept:
+ """List of mimetypes this client supports as
+ :class:`~werkzeug.datastructures.MIMEAccept` object.
+ """
+ return parse_accept_header(self.headers.get("Accept"), MIMEAccept)
+
+ @cached_property
+ def accept_charsets(self) -> CharsetAccept:
+ """List of charsets this client supports as
+ :class:`~werkzeug.datastructures.CharsetAccept` object.
+ """
+ return parse_accept_header(self.headers.get("Accept-Charset"), CharsetAccept)
+
+ @cached_property
+ def accept_encodings(self) -> Accept:
+ """List of encodings this client accepts. Encodings in a HTTP term
+ are compression encodings such as gzip. For charsets have a look at
+ :attr:`accept_charset`.
+ """
+ return parse_accept_header(self.headers.get("Accept-Encoding"))
+
+ @cached_property
+ def accept_languages(self) -> LanguageAccept:
+ """List of languages this client accepts as
+ :class:`~werkzeug.datastructures.LanguageAccept` object.
+
+ .. versionchanged 0.5
+ In previous versions this was a regular
+ :class:`~werkzeug.datastructures.Accept` object.
+ """
+ return parse_accept_header(self.headers.get("Accept-Language"), LanguageAccept)
+
+ # ETag
+
+ @cached_property
+ def cache_control(self) -> RequestCacheControl:
+ """A :class:`~werkzeug.datastructures.RequestCacheControl` object
+ for the incoming cache control headers.
+ """
+ cache_control = self.headers.get("Cache-Control")
+ return parse_cache_control_header(cache_control, None, RequestCacheControl)
+
+ @cached_property
+ def if_match(self) -> ETags:
+ """An object containing all the etags in the `If-Match` header.
+
+ :rtype: :class:`~werkzeug.datastructures.ETags`
+ """
+ return parse_etags(self.headers.get("If-Match"))
+
+ @cached_property
+ def if_none_match(self) -> ETags:
+ """An object containing all the etags in the `If-None-Match` header.
+
+ :rtype: :class:`~werkzeug.datastructures.ETags`
+ """
+ return parse_etags(self.headers.get("If-None-Match"))
+
+ @cached_property
+ def if_modified_since(self) -> datetime | None:
+ """The parsed `If-Modified-Since` header as a datetime object.
+
+ .. versionchanged:: 2.0
+ The datetime object is timezone-aware.
+ """
+ return parse_date(self.headers.get("If-Modified-Since"))
+
+ @cached_property
+ def if_unmodified_since(self) -> datetime | None:
+ """The parsed `If-Unmodified-Since` header as a datetime object.
+
+ .. versionchanged:: 2.0
+ The datetime object is timezone-aware.
+ """
+ return parse_date(self.headers.get("If-Unmodified-Since"))
+
+ @cached_property
+ def if_range(self) -> IfRange:
+ """The parsed ``If-Range`` header.
+
+ .. versionchanged:: 2.0
+ ``IfRange.date`` is timezone-aware.
+
+ .. versionadded:: 0.7
+ """
+ return parse_if_range_header(self.headers.get("If-Range"))
+
+ @cached_property
+ def range(self) -> Range | None:
+ """The parsed `Range` header.
+
+ .. versionadded:: 0.7
+
+ :rtype: :class:`~werkzeug.datastructures.Range`
+ """
+ return parse_range_header(self.headers.get("Range"))
+
+ # User Agent
+
+ @cached_property
+ def user_agent(self) -> UserAgent:
+ """The user agent. Use ``user_agent.string`` to get the header
+ value. Set :attr:`user_agent_class` to a subclass of
+ :class:`~werkzeug.user_agent.UserAgent` to provide parsing for
+ the other properties or other extended data.
+
+ .. versionchanged:: 2.1
+ The built-in parser was removed. Set ``user_agent_class`` to a ``UserAgent``
+ subclass to parse data from the string.
+ """
+ return self.user_agent_class(self.headers.get("User-Agent", ""))
+
+ # Authorization
+
+ @cached_property
+ def authorization(self) -> Authorization | None:
+ """The ``Authorization`` header parsed into an :class:`.Authorization` object.
+ ``None`` if the header is not present.
+
+ .. versionchanged:: 2.3
+ :class:`Authorization` is no longer a ``dict``. The ``token`` attribute
+ was added for auth schemes that use a token instead of parameters.
+ """
+ return Authorization.from_header(self.headers.get("Authorization"))
+
+ # CORS
+
+ origin = header_property[str](
+ "Origin",
+ doc=(
+ "The host that the request originated from. Set"
+ " :attr:`~CORSResponseMixin.access_control_allow_origin` on"
+ " the response to indicate which origins are allowed."
+ ),
+ read_only=True,
+ )
+
+ access_control_request_headers = header_property(
+ "Access-Control-Request-Headers",
+ load_func=parse_set_header,
+ doc=(
+ "Sent with a preflight request to indicate which headers"
+ " will be sent with the cross origin request. Set"
+ " :attr:`~CORSResponseMixin.access_control_allow_headers`"
+ " on the response to indicate which headers are allowed."
+ ),
+ read_only=True,
+ )
+
+ access_control_request_method = header_property[str](
+ "Access-Control-Request-Method",
+ doc=(
+ "Sent with a preflight request to indicate which method"
+ " will be used for the cross origin request. Set"
+ " :attr:`~CORSResponseMixin.access_control_allow_methods`"
+ " on the response to indicate which methods are allowed."
+ ),
+ read_only=True,
+ )
+
+ @property
+ def is_json(self) -> bool:
+ """Check if the mimetype indicates JSON data, either
+ :mimetype:`application/json` or :mimetype:`application/*+json`.
+ """
+ mt = self.mimetype
+ return (
+ mt == "application/json"
+ or mt.startswith("application/")
+ and mt.endswith("+json")
+ )
diff --git a/venv/lib/python3.8/site-packages/werkzeug/sansio/response.py b/venv/lib/python3.8/site-packages/werkzeug/sansio/response.py
new file mode 100644
index 0000000..9093b0a
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/sansio/response.py
@@ -0,0 +1,754 @@
+from __future__ import annotations
+
+import typing as t
+from datetime import datetime
+from datetime import timedelta
+from datetime import timezone
+from http import HTTPStatus
+
+from ..datastructures import CallbackDict
+from ..datastructures import ContentRange
+from ..datastructures import ContentSecurityPolicy
+from ..datastructures import Headers
+from ..datastructures import HeaderSet
+from ..datastructures import ResponseCacheControl
+from ..datastructures import WWWAuthenticate
+from ..http import COEP
+from ..http import COOP
+from ..http import dump_age
+from ..http import dump_cookie
+from ..http import dump_header
+from ..http import dump_options_header
+from ..http import http_date
+from ..http import HTTP_STATUS_CODES
+from ..http import parse_age
+from ..http import parse_cache_control_header
+from ..http import parse_content_range_header
+from ..http import parse_csp_header
+from ..http import parse_date
+from ..http import parse_options_header
+from ..http import parse_set_header
+from ..http import quote_etag
+from ..http import unquote_etag
+from ..utils import get_content_type
+from ..utils import header_property
+
+if t.TYPE_CHECKING:
+ from ..datastructures.cache_control import _CacheControl
+
+
+def _set_property(name: str, doc: str | None = None) -> property:
+ def fget(self: Response) -> HeaderSet:
+ def on_update(header_set: HeaderSet) -> None:
+ if not header_set and name in self.headers:
+ del self.headers[name]
+ elif header_set:
+ self.headers[name] = header_set.to_header()
+
+ return parse_set_header(self.headers.get(name), on_update)
+
+ def fset(
+ self: Response,
+ value: None | (str | dict[str, str | int] | t.Iterable[str]),
+ ) -> None:
+ if not value:
+ del self.headers[name]
+ elif isinstance(value, str):
+ self.headers[name] = value
+ else:
+ self.headers[name] = dump_header(value)
+
+ return property(fget, fset, doc=doc)
+
+
+class Response:
+ """Represents the non-IO parts of an HTTP response, specifically the
+ status and headers but not the body.
+
+ This class is not meant for general use. It should only be used when
+ implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
+ provides a WSGI implementation at :cls:`werkzeug.wrappers.Response`.
+
+ :param status: The status code for the response. Either an int, in
+ which case the default status message is added, or a string in
+ the form ``{code} {message}``, like ``404 Not Found``. Defaults
+ to 200.
+ :param headers: A :class:`~werkzeug.datastructures.Headers` object,
+ or a list of ``(key, value)`` tuples that will be converted to a
+ ``Headers`` object.
+ :param mimetype: The mime type (content type without charset or
+ other parameters) of the response. If the value starts with
+ ``text/`` (or matches some other special cases), the charset
+ will be added to create the ``content_type``.
+ :param content_type: The full content type of the response.
+ Overrides building the value from ``mimetype``.
+
+ .. versionchanged:: 3.0
+ The ``charset`` attribute was removed.
+
+ .. versionadded:: 2.0
+ """
+
+ #: the default status if none is provided.
+ default_status = 200
+
+ #: the default mimetype if none is provided.
+ default_mimetype: str | None = "text/plain"
+
+ #: Warn if a cookie header exceeds this size. The default, 4093, should be
+ #: safely `supported by most browsers <cookie_>`_. A cookie larger than
+ #: this size will still be sent, but it may be ignored or handled
+ #: incorrectly by some browsers. Set to 0 to disable this check.
+ #:
+ #: .. versionadded:: 0.13
+ #:
+ #: .. _`cookie`: http://browsercookielimits.squawky.net/
+ max_cookie_size = 4093
+
+ # A :class:`Headers` object representing the response headers.
+ headers: Headers
+
+ def __init__(
+ self,
+ status: int | str | HTTPStatus | None = None,
+ headers: t.Mapping[str, str | t.Iterable[str]]
+ | t.Iterable[tuple[str, str]]
+ | None = None,
+ mimetype: str | None = None,
+ content_type: str | None = None,
+ ) -> None:
+ if isinstance(headers, Headers):
+ self.headers = headers
+ elif not headers:
+ self.headers = Headers()
+ else:
+ self.headers = Headers(headers)
+
+ if content_type is None:
+ if mimetype is None and "content-type" not in self.headers:
+ mimetype = self.default_mimetype
+ if mimetype is not None:
+ mimetype = get_content_type(mimetype, "utf-8")
+ content_type = mimetype
+ if content_type is not None:
+ self.headers["Content-Type"] = content_type
+ if status is None:
+ status = self.default_status
+ self.status = status # type: ignore
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} [{self.status}]>"
+
+ @property
+ def status_code(self) -> int:
+ """The HTTP status code as a number."""
+ return self._status_code
+
+ @status_code.setter
+ def status_code(self, code: int) -> None:
+ self.status = code # type: ignore
+
+ @property
+ def status(self) -> str:
+ """The HTTP status code as a string."""
+ return self._status
+
+ @status.setter
+ def status(self, value: str | int | HTTPStatus) -> None:
+ self._status, self._status_code = self._clean_status(value)
+
+ def _clean_status(self, value: str | int | HTTPStatus) -> tuple[str, int]:
+ if isinstance(value, (int, HTTPStatus)):
+ status_code = int(value)
+ else:
+ value = value.strip()
+
+ if not value:
+ raise ValueError("Empty status argument")
+
+ code_str, sep, _ = value.partition(" ")
+
+ try:
+ status_code = int(code_str)
+ except ValueError:
+ # only message
+ return f"0 {value}", 0
+
+ if sep:
+ # code and message
+ return value, status_code
+
+ # only code, look up message
+ try:
+ status = f"{status_code} {HTTP_STATUS_CODES[status_code].upper()}"
+ except KeyError:
+ status = f"{status_code} UNKNOWN"
+
+ return status, status_code
+
+ def set_cookie(
+ self,
+ key: str,
+ value: str = "",
+ max_age: timedelta | int | None = None,
+ expires: str | datetime | int | float | None = None,
+ path: str | None = "/",
+ domain: str | None = None,
+ secure: bool = False,
+ httponly: bool = False,
+ samesite: str | None = None,
+ ) -> None:
+ """Sets a cookie.
+
+ A warning is raised if the size of the cookie header exceeds
+ :attr:`max_cookie_size`, but the header will still be set.
+
+ :param key: the key (name) of the cookie to be set.
+ :param value: the value of the cookie.
+ :param max_age: should be a number of seconds, or `None` (default) if
+ the cookie should last only as long as the client's
+ browser session.
+ :param expires: should be a `datetime` object or UNIX timestamp.
+ :param path: limits the cookie to a given path, per default it will
+ span the whole domain.
+ :param domain: if you want to set a cross-domain cookie. For example,
+ ``domain="example.com"`` will set a cookie that is
+ readable by the domain ``www.example.com``,
+ ``foo.example.com`` etc. Otherwise, a cookie will only
+ be readable by the domain that set it.
+ :param secure: If ``True``, the cookie will only be available
+ via HTTPS.
+ :param httponly: Disallow JavaScript access to the cookie.
+ :param samesite: Limit the scope of the cookie to only be
+ attached to requests that are "same-site".
+ """
+ self.headers.add(
+ "Set-Cookie",
+ dump_cookie(
+ key,
+ value=value,
+ max_age=max_age,
+ expires=expires,
+ path=path,
+ domain=domain,
+ secure=secure,
+ httponly=httponly,
+ max_size=self.max_cookie_size,
+ samesite=samesite,
+ ),
+ )
+
+ def delete_cookie(
+ self,
+ key: str,
+ path: str | None = "/",
+ domain: str | None = None,
+ secure: bool = False,
+ httponly: bool = False,
+ samesite: str | None = None,
+ ) -> None:
+ """Delete a cookie. Fails silently if key doesn't exist.
+
+ :param key: the key (name) of the cookie to be deleted.
+ :param path: if the cookie that should be deleted was limited to a
+ path, the path has to be defined here.
+ :param domain: if the cookie that should be deleted was limited to a
+ domain, that domain has to be defined here.
+ :param secure: If ``True``, the cookie will only be available
+ via HTTPS.
+ :param httponly: Disallow JavaScript access to the cookie.
+ :param samesite: Limit the scope of the cookie to only be
+ attached to requests that are "same-site".
+ """
+ self.set_cookie(
+ key,
+ expires=0,
+ max_age=0,
+ path=path,
+ domain=domain,
+ secure=secure,
+ httponly=httponly,
+ samesite=samesite,
+ )
+
+ @property
+ def is_json(self) -> bool:
+ """Check if the mimetype indicates JSON data, either
+ :mimetype:`application/json` or :mimetype:`application/*+json`.
+ """
+ mt = self.mimetype
+ return mt is not None and (
+ mt == "application/json"
+ or mt.startswith("application/")
+ and mt.endswith("+json")
+ )
+
+ # Common Descriptors
+
+ @property
+ def mimetype(self) -> str | None:
+ """The mimetype (content type without charset etc.)"""
+ ct = self.headers.get("content-type")
+
+ if ct:
+ return ct.split(";")[0].strip()
+ else:
+ return None
+
+ @mimetype.setter
+ def mimetype(self, value: str) -> None:
+ self.headers["Content-Type"] = get_content_type(value, "utf-8")
+
+ @property
+ def mimetype_params(self) -> dict[str, str]:
+ """The mimetype parameters as dict. For example if the
+ content type is ``text/html; charset=utf-8`` the params would be
+ ``{'charset': 'utf-8'}``.
+
+ .. versionadded:: 0.5
+ """
+
+ def on_update(d: CallbackDict[str, str]) -> None:
+ self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
+
+ d = parse_options_header(self.headers.get("content-type", ""))[1]
+ return CallbackDict(d, on_update)
+
+ location = header_property[str](
+ "Location",
+ doc="""The Location response-header field is used to redirect
+ the recipient to a location other than the Request-URI for
+ completion of the request or identification of a new
+ resource.""",
+ )
+ age = header_property(
+ "Age",
+ None,
+ parse_age,
+ dump_age, # type: ignore
+ doc="""The Age response-header field conveys the sender's
+ estimate of the amount of time since the response (or its
+ revalidation) was generated at the origin server.
+
+ Age values are non-negative decimal integers, representing time
+ in seconds.""",
+ )
+ content_type = header_property[str](
+ "Content-Type",
+ doc="""The Content-Type entity-header field indicates the media
+ type of the entity-body sent to the recipient or, in the case of
+ the HEAD method, the media type that would have been sent had
+ the request been a GET.""",
+ )
+ content_length = header_property(
+ "Content-Length",
+ None,
+ int,
+ str,
+ doc="""The Content-Length entity-header field indicates the size
+ of the entity-body, in decimal number of OCTETs, sent to the
+ recipient or, in the case of the HEAD method, the size of the
+ entity-body that would have been sent had the request been a
+ GET.""",
+ )
+ content_location = header_property[str](
+ "Content-Location",
+ doc="""The Content-Location entity-header field MAY be used to
+ supply the resource location for the entity enclosed in the
+ message when that entity is accessible from a location separate
+ from the requested resource's URI.""",
+ )
+ content_encoding = header_property[str](
+ "Content-Encoding",
+ doc="""The Content-Encoding entity-header field is used as a
+ modifier to the media-type. When present, its value indicates
+ what additional content codings have been applied to the
+ entity-body, and thus what decoding mechanisms must be applied
+ in order to obtain the media-type referenced by the Content-Type
+ header field.""",
+ )
+ content_md5 = header_property[str](
+ "Content-MD5",
+ doc="""The Content-MD5 entity-header field, as defined in
+ RFC 1864, is an MD5 digest of the entity-body for the purpose of
+ providing an end-to-end message integrity check (MIC) of the
+ entity-body. (Note: a MIC is good for detecting accidental
+ modification of the entity-body in transit, but is not proof
+ against malicious attacks.)""",
+ )
+ date = header_property(
+ "Date",
+ None,
+ parse_date,
+ http_date,
+ doc="""The Date general-header field represents the date and
+ time at which the message was originated, having the same
+ semantics as orig-date in RFC 822.
+
+ .. versionchanged:: 2.0
+ The datetime object is timezone-aware.
+ """,
+ )
+ expires = header_property(
+ "Expires",
+ None,
+ parse_date,
+ http_date,
+ doc="""The Expires entity-header field gives the date/time after
+ which the response is considered stale. A stale cache entry may
+ not normally be returned by a cache.
+
+ .. versionchanged:: 2.0
+ The datetime object is timezone-aware.
+ """,
+ )
+ last_modified = header_property(
+ "Last-Modified",
+ None,
+ parse_date,
+ http_date,
+ doc="""The Last-Modified entity-header field indicates the date
+ and time at which the origin server believes the variant was
+ last modified.
+
+ .. versionchanged:: 2.0
+ The datetime object is timezone-aware.
+ """,
+ )
+
+ @property
+ def retry_after(self) -> datetime | None:
+ """The Retry-After response-header field can be used with a
+ 503 (Service Unavailable) response to indicate how long the
+ service is expected to be unavailable to the requesting client.
+
+ Time in seconds until expiration or date.
+
+ .. versionchanged:: 2.0
+ The datetime object is timezone-aware.
+ """
+ value = self.headers.get("retry-after")
+ if value is None:
+ return None
+
+ try:
+ seconds = int(value)
+ except ValueError:
+ return parse_date(value)
+
+ return datetime.now(timezone.utc) + timedelta(seconds=seconds)
+
+ @retry_after.setter
+ def retry_after(self, value: datetime | int | str | None) -> None:
+ if value is None:
+ if "retry-after" in self.headers:
+ del self.headers["retry-after"]
+ return
+ elif isinstance(value, datetime):
+ value = http_date(value)
+ else:
+ value = str(value)
+ self.headers["Retry-After"] = value
+
+ vary = _set_property(
+ "Vary",
+ doc="""The Vary field value indicates the set of request-header
+ fields that fully determines, while the response is fresh,
+ whether a cache is permitted to use the response to reply to a
+ subsequent request without revalidation.""",
+ )
+ content_language = _set_property(
+ "Content-Language",
+ doc="""The Content-Language entity-header field describes the
+ natural language(s) of the intended audience for the enclosed
+ entity. Note that this might not be equivalent to all the
+ languages used within the entity-body.""",
+ )
+ allow = _set_property(
+ "Allow",
+ doc="""The Allow entity-header field lists the set of methods
+ supported by the resource identified by the Request-URI. The
+ purpose of this field is strictly to inform the recipient of
+ valid methods associated with the resource. An Allow header
+ field MUST be present in a 405 (Method Not Allowed)
+ response.""",
+ )
+
+ # ETag
+
+ @property
+ def cache_control(self) -> ResponseCacheControl:
+ """The Cache-Control general-header field is used to specify
+ directives that MUST be obeyed by all caching mechanisms along the
+ request/response chain.
+ """
+
+ def on_update(cache_control: _CacheControl) -> None:
+ if not cache_control and "cache-control" in self.headers:
+ del self.headers["cache-control"]
+ elif cache_control:
+ self.headers["Cache-Control"] = cache_control.to_header()
+
+ return parse_cache_control_header(
+ self.headers.get("cache-control"), on_update, ResponseCacheControl
+ )
+
+ def set_etag(self, etag: str, weak: bool = False) -> None:
+ """Set the etag, and override the old one if there was one."""
+ self.headers["ETag"] = quote_etag(etag, weak)
+
+ def get_etag(self) -> tuple[str, bool] | tuple[None, None]:
+ """Return a tuple in the form ``(etag, is_weak)``. If there is no
+ ETag the return value is ``(None, None)``.
+ """
+ return unquote_etag(self.headers.get("ETag"))
+
+ accept_ranges = header_property[str](
+ "Accept-Ranges",
+ doc="""The `Accept-Ranges` header. Even though the name would
+ indicate that multiple values are supported, it must be one
+ string token only.
+
+ The values ``'bytes'`` and ``'none'`` are common.
+
+ .. versionadded:: 0.7""",
+ )
+
+ @property
+ def content_range(self) -> ContentRange:
+ """The ``Content-Range`` header as a
+ :class:`~werkzeug.datastructures.ContentRange` object. Available
+ even if the header is not set.
+
+ .. versionadded:: 0.7
+ """
+
+ def on_update(rng: ContentRange) -> None:
+ if not rng:
+ del self.headers["content-range"]
+ else:
+ self.headers["Content-Range"] = rng.to_header()
+
+ rv = parse_content_range_header(self.headers.get("content-range"), on_update)
+ # always provide a content range object to make the descriptor
+ # more user friendly. It provides an unset() method that can be
+ # used to remove the header quickly.
+ if rv is None:
+ rv = ContentRange(None, None, None, on_update=on_update)
+ return rv
+
+ @content_range.setter
+ def content_range(self, value: ContentRange | str | None) -> None:
+ if not value:
+ del self.headers["content-range"]
+ elif isinstance(value, str):
+ self.headers["Content-Range"] = value
+ else:
+ self.headers["Content-Range"] = value.to_header()
+
+ # Authorization
+
+ @property
+ def www_authenticate(self) -> WWWAuthenticate:
+ """The ``WWW-Authenticate`` header parsed into a :class:`.WWWAuthenticate`
+ object. Modifying the object will modify the header value.
+
+ This header is not set by default. To set this header, assign an instance of
+ :class:`.WWWAuthenticate` to this attribute.
+
+ .. code-block:: python
+
+ response.www_authenticate = WWWAuthenticate(
+ "basic", {"realm": "Authentication Required"}
+ )
+
+ Multiple values for this header can be sent to give the client multiple options.
+ Assign a list to set multiple headers. However, modifying the items in the list
+ will not automatically update the header values, and accessing this attribute
+ will only ever return the first value.
+
+ To unset this header, assign ``None`` or use ``del``.
+
+ .. versionchanged:: 2.3
+ This attribute can be assigned to to set the header. A list can be assigned
+ to set multiple header values. Use ``del`` to unset the header.
+
+ .. versionchanged:: 2.3
+ :class:`WWWAuthenticate` is no longer a ``dict``. The ``token`` attribute
+ was added for auth challenges that use a token instead of parameters.
+ """
+ value = WWWAuthenticate.from_header(self.headers.get("WWW-Authenticate"))
+
+ if value is None:
+ value = WWWAuthenticate("basic")
+
+ def on_update(value: WWWAuthenticate) -> None:
+ self.www_authenticate = value
+
+ value._on_update = on_update
+ return value
+
+ @www_authenticate.setter
+ def www_authenticate(
+ self, value: WWWAuthenticate | list[WWWAuthenticate] | None
+ ) -> None:
+ if not value: # None or empty list
+ del self.www_authenticate
+ elif isinstance(value, list):
+ # Clear any existing header by setting the first item.
+ self.headers.set("WWW-Authenticate", value[0].to_header())
+
+ for item in value[1:]:
+ # Add additional header lines for additional items.
+ self.headers.add("WWW-Authenticate", item.to_header())
+ else:
+ self.headers.set("WWW-Authenticate", value.to_header())
+
+ def on_update(value: WWWAuthenticate) -> None:
+ self.www_authenticate = value
+
+ # When setting a single value, allow updating it directly.
+ value._on_update = on_update
+
+ @www_authenticate.deleter
+ def www_authenticate(self) -> None:
+ if "WWW-Authenticate" in self.headers:
+ del self.headers["WWW-Authenticate"]
+
+ # CSP
+
+ @property
+ def content_security_policy(self) -> ContentSecurityPolicy:
+ """The ``Content-Security-Policy`` header as a
+ :class:`~werkzeug.datastructures.ContentSecurityPolicy` object. Available
+ even if the header is not set.
+
+ The Content-Security-Policy header adds an additional layer of
+ security to help detect and mitigate certain types of attacks.
+ """
+
+ def on_update(csp: ContentSecurityPolicy) -> None:
+ if not csp:
+ del self.headers["content-security-policy"]
+ else:
+ self.headers["Content-Security-Policy"] = csp.to_header()
+
+ rv = parse_csp_header(self.headers.get("content-security-policy"), on_update)
+ if rv is None:
+ rv = ContentSecurityPolicy(None, on_update=on_update)
+ return rv
+
+ @content_security_policy.setter
+ def content_security_policy(
+ self, value: ContentSecurityPolicy | str | None
+ ) -> None:
+ if not value:
+ del self.headers["content-security-policy"]
+ elif isinstance(value, str):
+ self.headers["Content-Security-Policy"] = value
+ else:
+ self.headers["Content-Security-Policy"] = value.to_header()
+
+ @property
+ def content_security_policy_report_only(self) -> ContentSecurityPolicy:
+ """The ``Content-Security-policy-report-only`` header as a
+ :class:`~werkzeug.datastructures.ContentSecurityPolicy` object. Available
+ even if the header is not set.
+
+ The Content-Security-Policy-Report-Only header adds a csp policy
+ that is not enforced but is reported thereby helping detect
+ certain types of attacks.
+ """
+
+ def on_update(csp: ContentSecurityPolicy) -> None:
+ if not csp:
+ del self.headers["content-security-policy-report-only"]
+ else:
+ self.headers["Content-Security-policy-report-only"] = csp.to_header()
+
+ rv = parse_csp_header(
+ self.headers.get("content-security-policy-report-only"), on_update
+ )
+ if rv is None:
+ rv = ContentSecurityPolicy(None, on_update=on_update)
+ return rv
+
+ @content_security_policy_report_only.setter
+ def content_security_policy_report_only(
+ self, value: ContentSecurityPolicy | str | None
+ ) -> None:
+ if not value:
+ del self.headers["content-security-policy-report-only"]
+ elif isinstance(value, str):
+ self.headers["Content-Security-policy-report-only"] = value
+ else:
+ self.headers["Content-Security-policy-report-only"] = value.to_header()
+
+ # CORS
+
+ @property
+ def access_control_allow_credentials(self) -> bool:
+ """Whether credentials can be shared by the browser to
+ JavaScript code. As part of the preflight request it indicates
+ whether credentials can be used on the cross origin request.
+ """
+ return "Access-Control-Allow-Credentials" in self.headers
+
+ @access_control_allow_credentials.setter
+ def access_control_allow_credentials(self, value: bool | None) -> None:
+ if value is True:
+ self.headers["Access-Control-Allow-Credentials"] = "true"
+ else:
+ self.headers.pop("Access-Control-Allow-Credentials", None)
+
+ access_control_allow_headers = header_property(
+ "Access-Control-Allow-Headers",
+ load_func=parse_set_header,
+ dump_func=dump_header,
+ doc="Which headers can be sent with the cross origin request.",
+ )
+
+ access_control_allow_methods = header_property(
+ "Access-Control-Allow-Methods",
+ load_func=parse_set_header,
+ dump_func=dump_header,
+ doc="Which methods can be used for the cross origin request.",
+ )
+
+ access_control_allow_origin = header_property[str](
+ "Access-Control-Allow-Origin",
+ doc="The origin or '*' for any origin that may make cross origin requests.",
+ )
+
+ access_control_expose_headers = header_property(
+ "Access-Control-Expose-Headers",
+ load_func=parse_set_header,
+ dump_func=dump_header,
+ doc="Which headers can be shared by the browser to JavaScript code.",
+ )
+
+ access_control_max_age = header_property(
+ "Access-Control-Max-Age",
+ load_func=int,
+ dump_func=str,
+ doc="The maximum age in seconds the access control settings can be cached for.",
+ )
+
+ cross_origin_opener_policy = header_property[COOP](
+ "Cross-Origin-Opener-Policy",
+ load_func=lambda value: COOP(value),
+ dump_func=lambda value: value.value,
+ default=COOP.UNSAFE_NONE,
+ doc="""Allows control over sharing of browsing context group with cross-origin
+ documents. Values must be a member of the :class:`werkzeug.http.COOP` enum.""",
+ )
+
+ cross_origin_embedder_policy = header_property[COEP](
+ "Cross-Origin-Embedder-Policy",
+ load_func=lambda value: COEP(value),
+ dump_func=lambda value: value.value,
+ default=COEP.UNSAFE_NONE,
+ doc="""Prevents a document from loading any cross-origin resources that do not
+ explicitly grant the document permission. Values must be a member of the
+ :class:`werkzeug.http.COEP` enum.""",
+ )
diff --git a/venv/lib/python3.8/site-packages/werkzeug/sansio/utils.py b/venv/lib/python3.8/site-packages/werkzeug/sansio/utils.py
new file mode 100644
index 0000000..14fa0ac
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/sansio/utils.py
@@ -0,0 +1,159 @@
+from __future__ import annotations
+
+import typing as t
+from urllib.parse import quote
+
+from .._internal import _plain_int
+from ..exceptions import SecurityError
+from ..urls import uri_to_iri
+
+
+def host_is_trusted(hostname: str | None, trusted_list: t.Iterable[str]) -> bool:
+ """Check if a host matches a list of trusted names.
+
+ :param hostname: The name to check.
+ :param trusted_list: A list of valid names to match. If a name
+ starts with a dot it will match all subdomains.
+
+ .. versionadded:: 0.9
+ """
+ if not hostname:
+ return False
+
+ try:
+ hostname = hostname.partition(":")[0].encode("idna").decode("ascii")
+ except UnicodeEncodeError:
+ return False
+
+ if isinstance(trusted_list, str):
+ trusted_list = [trusted_list]
+
+ for ref in trusted_list:
+ if ref.startswith("."):
+ ref = ref[1:]
+ suffix_match = True
+ else:
+ suffix_match = False
+
+ try:
+ ref = ref.partition(":")[0].encode("idna").decode("ascii")
+ except UnicodeEncodeError:
+ return False
+
+ if ref == hostname or (suffix_match and hostname.endswith(f".{ref}")):
+ return True
+
+ return False
+
+
+def get_host(
+ scheme: str,
+ host_header: str | None,
+ server: tuple[str, int | None] | None = None,
+ trusted_hosts: t.Iterable[str] | None = None,
+) -> str:
+ """Return the host for the given parameters.
+
+ This first checks the ``host_header``. If it's not present, then
+ ``server`` is used. The host will only contain the port if it is
+ different than the standard port for the protocol.
+
+ Optionally, verify that the host is trusted using
+ :func:`host_is_trusted` and raise a
+ :exc:`~werkzeug.exceptions.SecurityError` if it is not.
+
+ :param scheme: The protocol the request used, like ``"https"``.
+ :param host_header: The ``Host`` header value.
+ :param server: Address of the server. ``(host, port)``, or
+ ``(path, None)`` for unix sockets.
+ :param trusted_hosts: A list of trusted host names.
+
+ :return: Host, with port if necessary.
+ :raise ~werkzeug.exceptions.SecurityError: If the host is not
+ trusted.
+ """
+ host = ""
+
+ if host_header is not None:
+ host = host_header
+ elif server is not None:
+ host = server[0]
+
+ if server[1] is not None:
+ host = f"{host}:{server[1]}"
+
+ if scheme in {"http", "ws"} and host.endswith(":80"):
+ host = host[:-3]
+ elif scheme in {"https", "wss"} and host.endswith(":443"):
+ host = host[:-4]
+
+ if trusted_hosts is not None:
+ if not host_is_trusted(host, trusted_hosts):
+ raise SecurityError(f"Host {host!r} is not trusted.")
+
+ return host
+
+
+def get_current_url(
+ scheme: str,
+ host: str,
+ root_path: str | None = None,
+ path: str | None = None,
+ query_string: bytes | None = None,
+) -> str:
+ """Recreate the URL for a request. If an optional part isn't
+ provided, it and subsequent parts are not included in the URL.
+
+ The URL is an IRI, not a URI, so it may contain Unicode characters.
+ Use :func:`~werkzeug.urls.iri_to_uri` to convert it to ASCII.
+
+ :param scheme: The protocol the request used, like ``"https"``.
+ :param host: The host the request was made to. See :func:`get_host`.
+ :param root_path: Prefix that the application is mounted under. This
+ is prepended to ``path``.
+ :param path: The path part of the URL after ``root_path``.
+ :param query_string: The portion of the URL after the "?".
+ """
+ url = [scheme, "://", host]
+
+ if root_path is None:
+ url.append("/")
+ return uri_to_iri("".join(url))
+
+ # safe = https://url.spec.whatwg.org/#url-path-segment-string
+ # as well as percent for things that are already quoted
+ url.append(quote(root_path.rstrip("/"), safe="!$&'()*+,/:;=@%"))
+ url.append("/")
+
+ if path is None:
+ return uri_to_iri("".join(url))
+
+ url.append(quote(path.lstrip("/"), safe="!$&'()*+,/:;=@%"))
+
+ if query_string:
+ url.append("?")
+ url.append(quote(query_string, safe="!$&'()*+,/:;=?@%"))
+
+ return uri_to_iri("".join(url))
+
+
+def get_content_length(
+ http_content_length: str | None = None,
+ http_transfer_encoding: str | None = None,
+) -> int | None:
+ """Return the ``Content-Length`` header value as an int. If the header is not given
+ or the ``Transfer-Encoding`` header is ``chunked``, ``None`` is returned to indicate
+ a streaming request. If the value is not an integer, or negative, 0 is returned.
+
+ :param http_content_length: The Content-Length HTTP header.
+ :param http_transfer_encoding: The Transfer-Encoding HTTP header.
+
+ .. versionadded:: 2.2
+ """
+ if http_transfer_encoding == "chunked" or http_content_length is None:
+ return None
+
+ try:
+ return max(0, _plain_int(http_content_length))
+ except ValueError:
+ return 0
diff --git a/venv/lib/python3.8/site-packages/werkzeug/security.py b/venv/lib/python3.8/site-packages/werkzeug/security.py
new file mode 100644
index 0000000..9975979
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/security.py
@@ -0,0 +1,163 @@
+from __future__ import annotations
+
+import hashlib
+import hmac
+import os
+import posixpath
+import secrets
+
+SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+DEFAULT_PBKDF2_ITERATIONS = 600000
+
+_os_alt_seps: list[str] = list(
+ sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != "/"
+)
+
+
+def gen_salt(length: int) -> str:
+ """Generate a random string of SALT_CHARS with specified ``length``."""
+ if length <= 0:
+ raise ValueError("Salt length must be at least 1.")
+
+ return "".join(secrets.choice(SALT_CHARS) for _ in range(length))
+
+
+def _hash_internal(method: str, salt: str, password: str) -> tuple[str, str]:
+ method, *args = method.split(":")
+ salt_bytes = salt.encode()
+ password_bytes = password.encode()
+
+ if method == "scrypt":
+ if not args:
+ n = 2**15
+ r = 8
+ p = 1
+ else:
+ try:
+ n, r, p = map(int, args)
+ except ValueError:
+ raise ValueError("'scrypt' takes 3 arguments.") from None
+
+ maxmem = 132 * n * r * p # ideally 128, but some extra seems needed
+ return (
+ hashlib.scrypt(
+ password_bytes, salt=salt_bytes, n=n, r=r, p=p, maxmem=maxmem
+ ).hex(),
+ f"scrypt:{n}:{r}:{p}",
+ )
+ elif method == "pbkdf2":
+ len_args = len(args)
+
+ if len_args == 0:
+ hash_name = "sha256"
+ iterations = DEFAULT_PBKDF2_ITERATIONS
+ elif len_args == 1:
+ hash_name = args[0]
+ iterations = DEFAULT_PBKDF2_ITERATIONS
+ elif len_args == 2:
+ hash_name = args[0]
+ iterations = int(args[1])
+ else:
+ raise ValueError("'pbkdf2' takes 2 arguments.")
+
+ return (
+ hashlib.pbkdf2_hmac(
+ hash_name, password_bytes, salt_bytes, iterations
+ ).hex(),
+ f"pbkdf2:{hash_name}:{iterations}",
+ )
+ else:
+ raise ValueError(f"Invalid hash method '{method}'.")
+
+
+def generate_password_hash(
+ password: str, method: str = "scrypt", salt_length: int = 16
+) -> str:
+ """Securely hash a password for storage. A password can be compared to a stored hash
+ using :func:`check_password_hash`.
+
+ The following methods are supported:
+
+ - ``scrypt``, the default. The parameters are ``n``, ``r``, and ``p``, the default
+ is ``scrypt:32768:8:1``. See :func:`hashlib.scrypt`.
+ - ``pbkdf2``, less secure. The parameters are ``hash_method`` and ``iterations``,
+ the default is ``pbkdf2:sha256:600000``. See :func:`hashlib.pbkdf2_hmac`.
+
+ Default parameters may be updated to reflect current guidelines, and methods may be
+ deprecated and removed if they are no longer considered secure. To migrate old
+ hashes, you may generate a new hash when checking an old hash, or you may contact
+ users with a link to reset their password.
+
+ :param password: The plaintext password.
+ :param method: The key derivation function and parameters.
+ :param salt_length: The number of characters to generate for the salt.
+
+ .. versionchanged:: 2.3
+ Scrypt support was added.
+
+ .. versionchanged:: 2.3
+ The default iterations for pbkdf2 was increased to 600,000.
+
+ .. versionchanged:: 2.3
+ All plain hashes are deprecated and will not be supported in Werkzeug 3.0.
+ """
+ salt = gen_salt(salt_length)
+ h, actual_method = _hash_internal(method, salt, password)
+ return f"{actual_method}${salt}${h}"
+
+
+def check_password_hash(pwhash: str, password: str) -> bool:
+ """Securely check that the given stored password hash, previously generated using
+ :func:`generate_password_hash`, matches the given password.
+
+ Methods may be deprecated and removed if they are no longer considered secure. To
+ migrate old hashes, you may generate a new hash when checking an old hash, or you
+ may contact users with a link to reset their password.
+
+ :param pwhash: The hashed password.
+ :param password: The plaintext password.
+
+ .. versionchanged:: 2.3
+ All plain hashes are deprecated and will not be supported in Werkzeug 3.0.
+ """
+ try:
+ method, salt, hashval = pwhash.split("$", 2)
+ except ValueError:
+ return False
+
+ return hmac.compare_digest(_hash_internal(method, salt, password)[0], hashval)
+
+
+def safe_join(directory: str, *pathnames: str) -> str | None:
+ """Safely join zero or more untrusted path components to a base
+ directory to avoid escaping the base directory.
+
+ :param directory: The trusted base directory.
+ :param pathnames: The untrusted path components relative to the
+ base directory.
+ :return: A safe path, otherwise ``None``.
+ """
+ if not directory:
+ # Ensure we end up with ./path if directory="" is given,
+ # otherwise the first untrusted part could become trusted.
+ directory = "."
+
+ parts = [directory]
+
+ for filename in pathnames:
+ if filename != "":
+ filename = posixpath.normpath(filename)
+
+ if (
+ any(sep in filename for sep in _os_alt_seps)
+ or os.path.isabs(filename)
+ # ntpath.isabs doesn't catch this on Python < 3.11
+ or filename.startswith("/")
+ or filename == ".."
+ or filename.startswith("../")
+ ):
+ return None
+
+ parts.append(filename)
+
+ return posixpath.join(*parts)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/serving.py b/venv/lib/python3.8/site-packages/werkzeug/serving.py
new file mode 100644
index 0000000..ef32b88
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/serving.py
@@ -0,0 +1,1125 @@
+"""A WSGI and HTTP server for use **during development only**. This
+server is convenient to use, but is not designed to be particularly
+stable, secure, or efficient. Use a dedicate WSGI server and HTTP
+server when deploying to production.
+
+It provides features like interactive debugging and code reloading. Use
+``run_simple`` to start the server. Put this in a ``run.py`` script:
+
+.. code-block:: python
+
+ from myapp import create_app
+ from werkzeug import run_simple
+"""
+
+from __future__ import annotations
+
+import errno
+import io
+import os
+import selectors
+import socket
+import socketserver
+import sys
+import typing as t
+from datetime import datetime as dt
+from datetime import timedelta
+from datetime import timezone
+from http.server import BaseHTTPRequestHandler
+from http.server import HTTPServer
+from urllib.parse import unquote
+from urllib.parse import urlsplit
+
+from ._internal import _log
+from ._internal import _wsgi_encoding_dance
+from .exceptions import InternalServerError
+from .urls import uri_to_iri
+
+try:
+ import ssl
+
+ connection_dropped_errors: tuple[type[Exception], ...] = (
+ ConnectionError,
+ socket.timeout,
+ ssl.SSLEOFError,
+ )
+except ImportError:
+
+ class _SslDummy:
+ def __getattr__(self, name: str) -> t.Any:
+ raise RuntimeError( # noqa: B904
+ "SSL is unavailable because this Python runtime was not"
+ " compiled with SSL/TLS support."
+ )
+
+ ssl = _SslDummy() # type: ignore
+ connection_dropped_errors = (ConnectionError, socket.timeout)
+
+_log_add_style = True
+
+if os.name == "nt":
+ try:
+ __import__("colorama")
+ except ImportError:
+ _log_add_style = False
+
+can_fork = hasattr(os, "fork")
+
+if can_fork:
+ ForkingMixIn = socketserver.ForkingMixIn
+else:
+
+ class ForkingMixIn: # type: ignore
+ pass
+
+
+try:
+ af_unix = socket.AF_UNIX
+except AttributeError:
+ af_unix = None # type: ignore
+
+LISTEN_QUEUE = 128
+
+_TSSLContextArg = t.Optional[
+ t.Union["ssl.SSLContext", t.Tuple[str, t.Optional[str]], t.Literal["adhoc"]]
+]
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+ from cryptography.hazmat.primitives.asymmetric.rsa import (
+ RSAPrivateKeyWithSerialization,
+ )
+ from cryptography.x509 import Certificate
+
+
+class DechunkedInput(io.RawIOBase):
+ """An input stream that handles Transfer-Encoding 'chunked'"""
+
+ def __init__(self, rfile: t.IO[bytes]) -> None:
+ self._rfile = rfile
+ self._done = False
+ self._len = 0
+
+ def readable(self) -> bool:
+ return True
+
+ def read_chunk_len(self) -> int:
+ try:
+ line = self._rfile.readline().decode("latin1")
+ _len = int(line.strip(), 16)
+ except ValueError as e:
+ raise OSError("Invalid chunk header") from e
+ if _len < 0:
+ raise OSError("Negative chunk length not allowed")
+ return _len
+
+ def readinto(self, buf: bytearray) -> int: # type: ignore
+ read = 0
+ while not self._done and read < len(buf):
+ if self._len == 0:
+ # This is the first chunk or we fully consumed the previous
+ # one. Read the next length of the next chunk
+ self._len = self.read_chunk_len()
+
+ if self._len == 0:
+ # Found the final chunk of size 0. The stream is now exhausted,
+ # but there is still a final newline that should be consumed
+ self._done = True
+
+ if self._len > 0:
+ # There is data (left) in this chunk, so append it to the
+ # buffer. If this operation fully consumes the chunk, this will
+ # reset self._len to 0.
+ n = min(len(buf), self._len)
+
+ # If (read + chunk size) becomes more than len(buf), buf will
+ # grow beyond the original size and read more data than
+ # required. So only read as much data as can fit in buf.
+ if read + n > len(buf):
+ buf[read:] = self._rfile.read(len(buf) - read)
+ self._len -= len(buf) - read
+ read = len(buf)
+ else:
+ buf[read : read + n] = self._rfile.read(n)
+ self._len -= n
+ read += n
+
+ if self._len == 0:
+ # Skip the terminating newline of a chunk that has been fully
+ # consumed. This also applies to the 0-sized final chunk
+ terminator = self._rfile.readline()
+ if terminator not in (b"\n", b"\r\n", b"\r"):
+ raise OSError("Missing chunk terminating newline")
+
+ return read
+
+
+class WSGIRequestHandler(BaseHTTPRequestHandler):
+ """A request handler that implements WSGI dispatching."""
+
+ server: BaseWSGIServer
+
+ @property
+ def server_version(self) -> str: # type: ignore
+ return self.server._server_version
+
+ def make_environ(self) -> WSGIEnvironment:
+ request_url = urlsplit(self.path)
+ url_scheme = "http" if self.server.ssl_context is None else "https"
+
+ if not self.client_address:
+ self.client_address = ("<local>", 0)
+ elif isinstance(self.client_address, str):
+ self.client_address = (self.client_address, 0)
+
+ # If there was no scheme but the path started with two slashes,
+ # the first segment may have been incorrectly parsed as the
+ # netloc, prepend it to the path again.
+ if not request_url.scheme and request_url.netloc:
+ path_info = f"/{request_url.netloc}{request_url.path}"
+ else:
+ path_info = request_url.path
+
+ path_info = unquote(path_info)
+
+ environ: WSGIEnvironment = {
+ "wsgi.version": (1, 0),
+ "wsgi.url_scheme": url_scheme,
+ "wsgi.input": self.rfile,
+ "wsgi.errors": sys.stderr,
+ "wsgi.multithread": self.server.multithread,
+ "wsgi.multiprocess": self.server.multiprocess,
+ "wsgi.run_once": False,
+ "werkzeug.socket": self.connection,
+ "SERVER_SOFTWARE": self.server_version,
+ "REQUEST_METHOD": self.command,
+ "SCRIPT_NAME": "",
+ "PATH_INFO": _wsgi_encoding_dance(path_info),
+ "QUERY_STRING": _wsgi_encoding_dance(request_url.query),
+ # Non-standard, added by mod_wsgi, uWSGI
+ "REQUEST_URI": _wsgi_encoding_dance(self.path),
+ # Non-standard, added by gunicorn
+ "RAW_URI": _wsgi_encoding_dance(self.path),
+ "REMOTE_ADDR": self.address_string(),
+ "REMOTE_PORT": self.port_integer(),
+ "SERVER_NAME": self.server.server_address[0],
+ "SERVER_PORT": str(self.server.server_address[1]),
+ "SERVER_PROTOCOL": self.request_version,
+ }
+
+ for key, value in self.headers.items():
+ if "_" in key:
+ continue
+
+ key = key.upper().replace("-", "_")
+ value = value.replace("\r\n", "")
+ if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):
+ key = f"HTTP_{key}"
+ if key in environ:
+ value = f"{environ[key]},{value}"
+ environ[key] = value
+
+ if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked":
+ environ["wsgi.input_terminated"] = True
+ environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"])
+
+ # Per RFC 2616, if the URL is absolute, use that as the host.
+ # We're using "has a scheme" to indicate an absolute URL.
+ if request_url.scheme and request_url.netloc:
+ environ["HTTP_HOST"] = request_url.netloc
+
+ try:
+ # binary_form=False gives nicer information, but wouldn't be compatible with
+ # what Nginx or Apache could return.
+ peer_cert = self.connection.getpeercert(binary_form=True)
+ if peer_cert is not None:
+ # Nginx and Apache use PEM format.
+ environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert(peer_cert)
+ except ValueError:
+ # SSL handshake hasn't finished.
+ self.server.log("error", "Cannot fetch SSL peer certificate info")
+ except AttributeError:
+ # Not using TLS, the socket will not have getpeercert().
+ pass
+
+ return environ
+
+ def run_wsgi(self) -> None:
+ if self.headers.get("Expect", "").lower().strip() == "100-continue":
+ self.wfile.write(b"HTTP/1.1 100 Continue\r\n\r\n")
+
+ self.environ = environ = self.make_environ()
+ status_set: str | None = None
+ headers_set: list[tuple[str, str]] | None = None
+ status_sent: str | None = None
+ headers_sent: list[tuple[str, str]] | None = None
+ chunk_response: bool = False
+
+ def write(data: bytes) -> None:
+ nonlocal status_sent, headers_sent, chunk_response
+ assert status_set is not None, "write() before start_response"
+ assert headers_set is not None, "write() before start_response"
+ if status_sent is None:
+ status_sent = status_set
+ headers_sent = headers_set
+ try:
+ code_str, msg = status_sent.split(None, 1)
+ except ValueError:
+ code_str, msg = status_sent, ""
+ code = int(code_str)
+ self.send_response(code, msg)
+ header_keys = set()
+ for key, value in headers_sent:
+ self.send_header(key, value)
+ header_keys.add(key.lower())
+
+ # Use chunked transfer encoding if there is no content
+ # length. Do not use for 1xx and 204 responses. 304
+ # responses and HEAD requests are also excluded, which
+ # is the more conservative behavior and matches other
+ # parts of the code.
+ # https://httpwg.org/specs/rfc7230.html#rfc.section.3.3.1
+ if (
+ not (
+ "content-length" in header_keys
+ or environ["REQUEST_METHOD"] == "HEAD"
+ or (100 <= code < 200)
+ or code in {204, 304}
+ )
+ and self.protocol_version >= "HTTP/1.1"
+ ):
+ chunk_response = True
+ self.send_header("Transfer-Encoding", "chunked")
+
+ # Always close the connection. This disables HTTP/1.1
+ # keep-alive connections. They aren't handled well by
+ # Python's http.server because it doesn't know how to
+ # drain the stream before the next request line.
+ self.send_header("Connection", "close")
+ self.end_headers()
+
+ assert isinstance(data, bytes), "applications must write bytes"
+
+ if data:
+ if chunk_response:
+ self.wfile.write(hex(len(data))[2:].encode())
+ self.wfile.write(b"\r\n")
+
+ self.wfile.write(data)
+
+ if chunk_response:
+ self.wfile.write(b"\r\n")
+
+ self.wfile.flush()
+
+ def start_response(status, headers, exc_info=None): # type: ignore
+ nonlocal status_set, headers_set
+ if exc_info:
+ try:
+ if headers_sent:
+ raise exc_info[1].with_traceback(exc_info[2])
+ finally:
+ exc_info = None
+ elif headers_set:
+ raise AssertionError("Headers already set")
+ status_set = status
+ headers_set = headers
+ return write
+
+ def execute(app: WSGIApplication) -> None:
+ application_iter = app(environ, start_response)
+ try:
+ for data in application_iter:
+ write(data)
+ if not headers_sent:
+ write(b"")
+ if chunk_response:
+ self.wfile.write(b"0\r\n\r\n")
+ finally:
+ # Check for any remaining data in the read socket, and discard it. This
+ # will read past request.max_content_length, but lets the client see a
+ # 413 response instead of a connection reset failure. If we supported
+ # keep-alive connections, this naive approach would break by reading the
+ # next request line. Since we know that write (above) closes every
+ # connection we can read everything.
+ selector = selectors.DefaultSelector()
+ selector.register(self.connection, selectors.EVENT_READ)
+ total_size = 0
+ total_reads = 0
+
+ # A timeout of 0 tends to fail because a client needs a small amount of
+ # time to continue sending its data.
+ while selector.select(timeout=0.01):
+ # Only read 10MB into memory at a time.
+ data = self.rfile.read(10_000_000)
+ total_size += len(data)
+ total_reads += 1
+
+ # Stop reading on no data, >=10GB, or 1000 reads. If a client sends
+ # more than that, they'll get a connection reset failure.
+ if not data or total_size >= 10_000_000_000 or total_reads > 1000:
+ break
+
+ selector.close()
+
+ if hasattr(application_iter, "close"):
+ application_iter.close()
+
+ try:
+ execute(self.server.app)
+ except connection_dropped_errors as e:
+ self.connection_dropped(e, environ)
+ except Exception as e:
+ if self.server.passthrough_errors:
+ raise
+
+ if status_sent is not None and chunk_response:
+ self.close_connection = True
+
+ try:
+ # if we haven't yet sent the headers but they are set
+ # we roll back to be able to set them again.
+ if status_sent is None:
+ status_set = None
+ headers_set = None
+ execute(InternalServerError())
+ except Exception:
+ pass
+
+ from .debug.tbtools import DebugTraceback
+
+ msg = DebugTraceback(e).render_traceback_text()
+ self.server.log("error", f"Error on request:\n{msg}")
+
+ def handle(self) -> None:
+ """Handles a request ignoring dropped connections."""
+ try:
+ super().handle()
+ except (ConnectionError, socket.timeout) as e:
+ self.connection_dropped(e)
+ except Exception as e:
+ if self.server.ssl_context is not None and is_ssl_error(e):
+ self.log_error("SSL error occurred: %s", e)
+ else:
+ raise
+
+ def connection_dropped(
+ self, error: BaseException, environ: WSGIEnvironment | None = None
+ ) -> None:
+ """Called if the connection was closed by the client. By default
+ nothing happens.
+ """
+
+ def __getattr__(self, name: str) -> t.Any:
+ # All HTTP methods are handled by run_wsgi.
+ if name.startswith("do_"):
+ return self.run_wsgi
+
+ # All other attributes are forwarded to the base class.
+ return getattr(super(), name)
+
+ def address_string(self) -> str:
+ if getattr(self, "environ", None):
+ return self.environ["REMOTE_ADDR"] # type: ignore
+
+ if not self.client_address:
+ return "<local>"
+
+ return self.client_address[0]
+
+ def port_integer(self) -> int:
+ return self.client_address[1]
+
+ # Escape control characters. This is defined (but private) in Python 3.12.
+ _control_char_table = str.maketrans(
+ {c: rf"\x{c:02x}" for c in [*range(0x20), *range(0x7F, 0xA0)]}
+ )
+ _control_char_table[ord("\\")] = r"\\"
+
+ def log_request(self, code: int | str = "-", size: int | str = "-") -> None:
+ try:
+ path = uri_to_iri(self.path)
+ msg = f"{self.command} {path} {self.request_version}"
+ except AttributeError:
+ # path isn't set if the requestline was bad
+ msg = self.requestline
+
+ # Escape control characters that may be in the decoded path.
+ msg = msg.translate(self._control_char_table)
+ code = str(code)
+
+ if code[0] == "1": # 1xx - Informational
+ msg = _ansi_style(msg, "bold")
+ elif code == "200": # 2xx - Success
+ pass
+ elif code == "304": # 304 - Resource Not Modified
+ msg = _ansi_style(msg, "cyan")
+ elif code[0] == "3": # 3xx - Redirection
+ msg = _ansi_style(msg, "green")
+ elif code == "404": # 404 - Resource Not Found
+ msg = _ansi_style(msg, "yellow")
+ elif code[0] == "4": # 4xx - Client Error
+ msg = _ansi_style(msg, "bold", "red")
+ else: # 5xx, or any other response
+ msg = _ansi_style(msg, "bold", "magenta")
+
+ self.log("info", '"%s" %s %s', msg, code, size)
+
+ def log_error(self, format: str, *args: t.Any) -> None:
+ self.log("error", format, *args)
+
+ def log_message(self, format: str, *args: t.Any) -> None:
+ self.log("info", format, *args)
+
+ def log(self, type: str, message: str, *args: t.Any) -> None:
+ # an IPv6 scoped address contains "%" which breaks logging
+ address_string = self.address_string().replace("%", "%%")
+ _log(
+ type,
+ f"{address_string} - - [{self.log_date_time_string()}] {message}\n",
+ *args,
+ )
+
+
+def _ansi_style(value: str, *styles: str) -> str:
+ if not _log_add_style:
+ return value
+
+ codes = {
+ "bold": 1,
+ "red": 31,
+ "green": 32,
+ "yellow": 33,
+ "magenta": 35,
+ "cyan": 36,
+ }
+
+ for style in styles:
+ value = f"\x1b[{codes[style]}m{value}"
+
+ return f"{value}\x1b[0m"
+
+
+def generate_adhoc_ssl_pair(
+ cn: str | None = None,
+) -> tuple[Certificate, RSAPrivateKeyWithSerialization]:
+ try:
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives.asymmetric import rsa
+ from cryptography.x509.oid import NameOID
+ except ImportError:
+ raise TypeError(
+ "Using ad-hoc certificates requires the cryptography library."
+ ) from None
+
+ backend = default_backend()
+ pkey = rsa.generate_private_key(
+ public_exponent=65537, key_size=2048, backend=backend
+ )
+
+ # pretty damn sure that this is not actually accepted by anyone
+ if cn is None:
+ cn = "*"
+
+ subject = x509.Name(
+ [
+ x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Dummy Certificate"),
+ x509.NameAttribute(NameOID.COMMON_NAME, cn),
+ ]
+ )
+
+ backend = default_backend()
+ cert = (
+ x509.CertificateBuilder()
+ .subject_name(subject)
+ .issuer_name(subject)
+ .public_key(pkey.public_key())
+ .serial_number(x509.random_serial_number())
+ .not_valid_before(dt.now(timezone.utc))
+ .not_valid_after(dt.now(timezone.utc) + timedelta(days=365))
+ .add_extension(x509.ExtendedKeyUsage([x509.OID_SERVER_AUTH]), critical=False)
+ .add_extension(
+ x509.SubjectAlternativeName([x509.DNSName(cn), x509.DNSName(f"*.{cn}")]),
+ critical=False,
+ )
+ .sign(pkey, hashes.SHA256(), backend)
+ )
+ return cert, pkey
+
+
+def make_ssl_devcert(
+ base_path: str, host: str | None = None, cn: str | None = None
+) -> tuple[str, str]:
+ """Creates an SSL key for development. This should be used instead of
+ the ``'adhoc'`` key which generates a new cert on each server start.
+ It accepts a path for where it should store the key and cert and
+ either a host or CN. If a host is given it will use the CN
+ ``*.host/CN=host``.
+
+ For more information see :func:`run_simple`.
+
+ .. versionadded:: 0.9
+
+ :param base_path: the path to the certificate and key. The extension
+ ``.crt`` is added for the certificate, ``.key`` is
+ added for the key.
+ :param host: the name of the host. This can be used as an alternative
+ for the `cn`.
+ :param cn: the `CN` to use.
+ """
+
+ if host is not None:
+ cn = host
+ cert, pkey = generate_adhoc_ssl_pair(cn=cn)
+
+ from cryptography.hazmat.primitives import serialization
+
+ cert_file = f"{base_path}.crt"
+ pkey_file = f"{base_path}.key"
+
+ with open(cert_file, "wb") as f:
+ f.write(cert.public_bytes(serialization.Encoding.PEM))
+ with open(pkey_file, "wb") as f:
+ f.write(
+ pkey.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption(),
+ )
+ )
+
+ return cert_file, pkey_file
+
+
+def generate_adhoc_ssl_context() -> ssl.SSLContext:
+ """Generates an adhoc SSL context for the development server."""
+ import atexit
+ import tempfile
+
+ cert, pkey = generate_adhoc_ssl_pair()
+
+ from cryptography.hazmat.primitives import serialization
+
+ cert_handle, cert_file = tempfile.mkstemp()
+ pkey_handle, pkey_file = tempfile.mkstemp()
+ atexit.register(os.remove, pkey_file)
+ atexit.register(os.remove, cert_file)
+
+ os.write(cert_handle, cert.public_bytes(serialization.Encoding.PEM))
+ os.write(
+ pkey_handle,
+ pkey.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption(),
+ ),
+ )
+
+ os.close(cert_handle)
+ os.close(pkey_handle)
+ ctx = load_ssl_context(cert_file, pkey_file)
+ return ctx
+
+
+def load_ssl_context(
+ cert_file: str, pkey_file: str | None = None, protocol: int | None = None
+) -> ssl.SSLContext:
+ """Loads SSL context from cert/private key files and optional protocol.
+ Many parameters are directly taken from the API of
+ :py:class:`ssl.SSLContext`.
+
+ :param cert_file: Path of the certificate to use.
+ :param pkey_file: Path of the private key to use. If not given, the key
+ will be obtained from the certificate file.
+ :param protocol: A ``PROTOCOL`` constant from the :mod:`ssl` module.
+ Defaults to :data:`ssl.PROTOCOL_TLS_SERVER`.
+ """
+ if protocol is None:
+ protocol = ssl.PROTOCOL_TLS_SERVER
+
+ ctx = ssl.SSLContext(protocol)
+ ctx.load_cert_chain(cert_file, pkey_file)
+ return ctx
+
+
+def is_ssl_error(error: Exception | None = None) -> bool:
+ """Checks if the given error (or the current one) is an SSL error."""
+ if error is None:
+ error = t.cast(Exception, sys.exc_info()[1])
+ return isinstance(error, ssl.SSLError)
+
+
+def select_address_family(host: str, port: int) -> socket.AddressFamily:
+ """Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
+ the host and port."""
+ if host.startswith("unix://"):
+ return socket.AF_UNIX
+ elif ":" in host and hasattr(socket, "AF_INET6"):
+ return socket.AF_INET6
+ return socket.AF_INET
+
+
+def get_sockaddr(
+ host: str, port: int, family: socket.AddressFamily
+) -> tuple[str, int] | str:
+ """Return a fully qualified socket address that can be passed to
+ :func:`socket.bind`."""
+ if family == af_unix:
+ # Absolute path avoids IDNA encoding error when path starts with dot.
+ return os.path.abspath(host.partition("://")[2])
+ try:
+ res = socket.getaddrinfo(
+ host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP
+ )
+ except socket.gaierror:
+ return host, port
+ return res[0][4] # type: ignore
+
+
+def get_interface_ip(family: socket.AddressFamily) -> str:
+ """Get the IP address of an external interface. Used when binding to
+ 0.0.0.0 or ::1 to show a more useful URL.
+
+ :meta private:
+ """
+ # arbitrary private address
+ host = "fd31:f903:5ab5:1::1" if family == socket.AF_INET6 else "10.253.155.219"
+
+ with socket.socket(family, socket.SOCK_DGRAM) as s:
+ try:
+ s.connect((host, 58162))
+ except OSError:
+ return "::1" if family == socket.AF_INET6 else "127.0.0.1"
+
+ return s.getsockname()[0] # type: ignore
+
+
+class BaseWSGIServer(HTTPServer):
+ """A WSGI server that that handles one request at a time.
+
+ Use :func:`make_server` to create a server instance.
+ """
+
+ multithread = False
+ multiprocess = False
+ request_queue_size = LISTEN_QUEUE
+ allow_reuse_address = True
+
+ def __init__(
+ self,
+ host: str,
+ port: int,
+ app: WSGIApplication,
+ handler: type[WSGIRequestHandler] | None = None,
+ passthrough_errors: bool = False,
+ ssl_context: _TSSLContextArg | None = None,
+ fd: int | None = None,
+ ) -> None:
+ if handler is None:
+ handler = WSGIRequestHandler
+
+ # If the handler doesn't directly set a protocol version and
+ # thread or process workers are used, then allow chunked
+ # responses and keep-alive connections by enabling HTTP/1.1.
+ if "protocol_version" not in vars(handler) and (
+ self.multithread or self.multiprocess
+ ):
+ handler.protocol_version = "HTTP/1.1"
+
+ self.host = host
+ self.port = port
+ self.app = app
+ self.passthrough_errors = passthrough_errors
+
+ self.address_family = address_family = select_address_family(host, port)
+ server_address = get_sockaddr(host, int(port), address_family)
+
+ # Remove a leftover Unix socket file from a previous run. Don't
+ # remove a file that was set up by run_simple.
+ if address_family == af_unix and fd is None:
+ server_address = t.cast(str, server_address)
+
+ if os.path.exists(server_address):
+ os.unlink(server_address)
+
+ # Bind and activate will be handled manually, it should only
+ # happen if we're not using a socket that was already set up.
+ super().__init__(
+ server_address, # type: ignore[arg-type]
+ handler,
+ bind_and_activate=False,
+ )
+
+ if fd is None:
+ # No existing socket descriptor, do bind_and_activate=True.
+ try:
+ self.server_bind()
+ self.server_activate()
+ except OSError as e:
+ # Catch connection issues and show them without the traceback. Show
+ # extra instructions for address not found, and for macOS.
+ self.server_close()
+ print(e.strerror, file=sys.stderr)
+
+ if e.errno == errno.EADDRINUSE:
+ print(
+ f"Port {port} is in use by another program. Either identify and"
+ " stop that program, or start the server with a different"
+ " port.",
+ file=sys.stderr,
+ )
+
+ if sys.platform == "darwin" and port == 5000:
+ print(
+ "On macOS, try disabling the 'AirPlay Receiver' service"
+ " from System Preferences -> General -> AirDrop & Handoff.",
+ file=sys.stderr,
+ )
+
+ sys.exit(1)
+ except BaseException:
+ self.server_close()
+ raise
+ else:
+ # TCPServer automatically opens a socket even if bind_and_activate is False.
+ # Close it to silence a ResourceWarning.
+ self.server_close()
+
+ # Use the passed in socket directly.
+ self.socket = socket.fromfd(fd, address_family, socket.SOCK_STREAM)
+ self.server_address = self.socket.getsockname()
+
+ if address_family != af_unix:
+ # If port was 0, this will record the bound port.
+ self.port = self.server_address[1]
+
+ if ssl_context is not None:
+ if isinstance(ssl_context, tuple):
+ ssl_context = load_ssl_context(*ssl_context)
+ elif ssl_context == "adhoc":
+ ssl_context = generate_adhoc_ssl_context()
+
+ self.socket = ssl_context.wrap_socket(self.socket, server_side=True)
+ self.ssl_context: ssl.SSLContext | None = ssl_context
+ else:
+ self.ssl_context = None
+
+ import importlib.metadata
+
+ self._server_version = f"Werkzeug/{importlib.metadata.version('werkzeug')}"
+
+ def log(self, type: str, message: str, *args: t.Any) -> None:
+ _log(type, message, *args)
+
+ def serve_forever(self, poll_interval: float = 0.5) -> None:
+ try:
+ super().serve_forever(poll_interval=poll_interval)
+ except KeyboardInterrupt:
+ pass
+ finally:
+ self.server_close()
+
+ def handle_error(
+ self, request: t.Any, client_address: tuple[str, int] | str
+ ) -> None:
+ if self.passthrough_errors:
+ raise
+
+ return super().handle_error(request, client_address)
+
+ def log_startup(self) -> None:
+ """Show information about the address when starting the server."""
+ dev_warning = (
+ "WARNING: This is a development server. Do not use it in a production"
+ " deployment. Use a production WSGI server instead."
+ )
+ dev_warning = _ansi_style(dev_warning, "bold", "red")
+ messages = [dev_warning]
+
+ if self.address_family == af_unix:
+ messages.append(f" * Running on {self.host}")
+ else:
+ scheme = "http" if self.ssl_context is None else "https"
+ display_hostname = self.host
+
+ if self.host in {"0.0.0.0", "::"}:
+ messages.append(f" * Running on all addresses ({self.host})")
+
+ if self.host == "0.0.0.0":
+ localhost = "127.0.0.1"
+ display_hostname = get_interface_ip(socket.AF_INET)
+ else:
+ localhost = "[::1]"
+ display_hostname = get_interface_ip(socket.AF_INET6)
+
+ messages.append(f" * Running on {scheme}://{localhost}:{self.port}")
+
+ if ":" in display_hostname:
+ display_hostname = f"[{display_hostname}]"
+
+ messages.append(f" * Running on {scheme}://{display_hostname}:{self.port}")
+
+ _log("info", "\n".join(messages))
+
+
+class ThreadedWSGIServer(socketserver.ThreadingMixIn, BaseWSGIServer):
+ """A WSGI server that handles concurrent requests in separate
+ threads.
+
+ Use :func:`make_server` to create a server instance.
+ """
+
+ multithread = True
+ daemon_threads = True
+
+
+class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
+ """A WSGI server that handles concurrent requests in separate forked
+ processes.
+
+ Use :func:`make_server` to create a server instance.
+ """
+
+ multiprocess = True
+
+ def __init__(
+ self,
+ host: str,
+ port: int,
+ app: WSGIApplication,
+ processes: int = 40,
+ handler: type[WSGIRequestHandler] | None = None,
+ passthrough_errors: bool = False,
+ ssl_context: _TSSLContextArg | None = None,
+ fd: int | None = None,
+ ) -> None:
+ if not can_fork:
+ raise ValueError("Your platform does not support forking.")
+
+ super().__init__(host, port, app, handler, passthrough_errors, ssl_context, fd)
+ self.max_children = processes
+
+
+def make_server(
+ host: str,
+ port: int,
+ app: WSGIApplication,
+ threaded: bool = False,
+ processes: int = 1,
+ request_handler: type[WSGIRequestHandler] | None = None,
+ passthrough_errors: bool = False,
+ ssl_context: _TSSLContextArg | None = None,
+ fd: int | None = None,
+) -> BaseWSGIServer:
+ """Create an appropriate WSGI server instance based on the value of
+ ``threaded`` and ``processes``.
+
+ This is called from :func:`run_simple`, but can be used separately
+ to have access to the server object, such as to run it in a separate
+ thread.
+
+ See :func:`run_simple` for parameter docs.
+ """
+ if threaded and processes > 1:
+ raise ValueError("Cannot have a multi-thread and multi-process server.")
+
+ if threaded:
+ return ThreadedWSGIServer(
+ host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
+ )
+
+ if processes > 1:
+ return ForkingWSGIServer(
+ host,
+ port,
+ app,
+ processes,
+ request_handler,
+ passthrough_errors,
+ ssl_context,
+ fd=fd,
+ )
+
+ return BaseWSGIServer(
+ host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
+ )
+
+
+def is_running_from_reloader() -> bool:
+ """Check if the server is running as a subprocess within the
+ Werkzeug reloader.
+
+ .. versionadded:: 0.10
+ """
+ return os.environ.get("WERKZEUG_RUN_MAIN") == "true"
+
+
+def run_simple(
+ hostname: str,
+ port: int,
+ application: WSGIApplication,
+ use_reloader: bool = False,
+ use_debugger: bool = False,
+ use_evalex: bool = True,
+ extra_files: t.Iterable[str] | None = None,
+ exclude_patterns: t.Iterable[str] | None = None,
+ reloader_interval: int = 1,
+ reloader_type: str = "auto",
+ threaded: bool = False,
+ processes: int = 1,
+ request_handler: type[WSGIRequestHandler] | None = None,
+ static_files: dict[str, str | tuple[str, str]] | None = None,
+ passthrough_errors: bool = False,
+ ssl_context: _TSSLContextArg | None = None,
+) -> None:
+ """Start a development server for a WSGI application. Various
+ optional features can be enabled.
+
+ .. warning::
+
+ Do not use the development server when deploying to production.
+ It is intended for use only during local development. It is not
+ designed to be particularly efficient, stable, or secure.
+
+ :param hostname: The host to bind to, for example ``'localhost'``.
+ Can be a domain, IPv4 or IPv6 address, or file path starting
+ with ``unix://`` for a Unix socket.
+ :param port: The port to bind to, for example ``8080``. Using ``0``
+ tells the OS to pick a random free port.
+ :param application: The WSGI application to run.
+ :param use_reloader: Use a reloader process to restart the server
+ process when files are changed.
+ :param use_debugger: Use Werkzeug's debugger, which will show
+ formatted tracebacks on unhandled exceptions.
+ :param use_evalex: Make the debugger interactive. A Python terminal
+ can be opened for any frame in the traceback. Some protection is
+ provided by requiring a PIN, but this should never be enabled
+ on a publicly visible server.
+ :param extra_files: The reloader will watch these files for changes
+ in addition to Python modules. For example, watch a
+ configuration file.
+ :param exclude_patterns: The reloader will ignore changes to any
+ files matching these :mod:`fnmatch` patterns. For example,
+ ignore cache files.
+ :param reloader_interval: How often the reloader tries to check for
+ changes.
+ :param reloader_type: The reloader to use. The ``'stat'`` reloader
+ is built in, but may require significant CPU to watch files. The
+ ``'watchdog'`` reloader is much more efficient but requires
+ installing the ``watchdog`` package first.
+ :param threaded: Handle concurrent requests using threads. Cannot be
+ used with ``processes``.
+ :param processes: Handle concurrent requests using up to this number
+ of processes. Cannot be used with ``threaded``.
+ :param request_handler: Use a different
+ :class:`~BaseHTTPServer.BaseHTTPRequestHandler` subclass to
+ handle requests.
+ :param static_files: A dict mapping URL prefixes to directories to
+ serve static files from using
+ :class:`~werkzeug.middleware.SharedDataMiddleware`.
+ :param passthrough_errors: Don't catch unhandled exceptions at the
+ server level, let the server crash instead. If ``use_debugger``
+ is enabled, the debugger will still catch such errors.
+ :param ssl_context: Configure TLS to serve over HTTPS. Can be an
+ :class:`ssl.SSLContext` object, a ``(cert_file, key_file)``
+ tuple to create a typical context, or the string ``'adhoc'`` to
+ generate a temporary self-signed certificate.
+
+ .. versionchanged:: 2.1
+ Instructions are shown for dealing with an "address already in
+ use" error.
+
+ .. versionchanged:: 2.1
+ Running on ``0.0.0.0`` or ``::`` shows the loopback IP in
+ addition to a real IP.
+
+ .. versionchanged:: 2.1
+ The command-line interface was removed.
+
+ .. versionchanged:: 2.0
+ Running on ``0.0.0.0`` or ``::`` shows a real IP address that
+ was bound as well as a warning not to run the development server
+ in production.
+
+ .. versionchanged:: 2.0
+ The ``exclude_patterns`` parameter was added.
+
+ .. versionchanged:: 0.15
+ Bind to a Unix socket by passing a ``hostname`` that starts with
+ ``unix://``.
+
+ .. versionchanged:: 0.10
+ Improved the reloader and added support for changing the backend
+ through the ``reloader_type`` parameter.
+
+ .. versionchanged:: 0.9
+ A command-line interface was added.
+
+ .. versionchanged:: 0.8
+ ``ssl_context`` can be a tuple of paths to the certificate and
+ private key files.
+
+ .. versionchanged:: 0.6
+ The ``ssl_context`` parameter was added.
+
+ .. versionchanged:: 0.5
+ The ``static_files`` and ``passthrough_errors`` parameters were
+ added.
+ """
+ if not isinstance(port, int):
+ raise TypeError("port must be an integer")
+
+ if static_files:
+ from .middleware.shared_data import SharedDataMiddleware
+
+ application = SharedDataMiddleware(application, static_files)
+
+ if use_debugger:
+ from .debug import DebuggedApplication
+
+ application = DebuggedApplication(application, evalex=use_evalex)
+ # Allow the specified hostname to use the debugger, in addition to
+ # localhost domains.
+ application.trusted_hosts.append(hostname)
+
+ if not is_running_from_reloader():
+ fd = None
+ else:
+ fd = int(os.environ["WERKZEUG_SERVER_FD"])
+
+ srv = make_server(
+ hostname,
+ port,
+ application,
+ threaded,
+ processes,
+ request_handler,
+ passthrough_errors,
+ ssl_context,
+ fd=fd,
+ )
+ srv.socket.set_inheritable(True)
+ os.environ["WERKZEUG_SERVER_FD"] = str(srv.fileno())
+
+ if not is_running_from_reloader():
+ srv.log_startup()
+ _log("info", _ansi_style("Press CTRL+C to quit", "yellow"))
+
+ if use_reloader:
+ from ._reloader import run_with_reloader
+
+ try:
+ run_with_reloader(
+ srv.serve_forever,
+ extra_files=extra_files,
+ exclude_patterns=exclude_patterns,
+ interval=reloader_interval,
+ reloader_type=reloader_type,
+ )
+ finally:
+ srv.server_close()
+ else:
+ srv.serve_forever()
diff --git a/venv/lib/python3.8/site-packages/werkzeug/test.py b/venv/lib/python3.8/site-packages/werkzeug/test.py
new file mode 100644
index 0000000..38f69bf
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/test.py
@@ -0,0 +1,1464 @@
+from __future__ import annotations
+
+import dataclasses
+import mimetypes
+import sys
+import typing as t
+from collections import defaultdict
+from datetime import datetime
+from io import BytesIO
+from itertools import chain
+from random import random
+from tempfile import TemporaryFile
+from time import time
+from urllib.parse import unquote
+from urllib.parse import urlsplit
+from urllib.parse import urlunsplit
+
+from ._internal import _get_environ
+from ._internal import _wsgi_decoding_dance
+from ._internal import _wsgi_encoding_dance
+from .datastructures import Authorization
+from .datastructures import CallbackDict
+from .datastructures import CombinedMultiDict
+from .datastructures import EnvironHeaders
+from .datastructures import FileMultiDict
+from .datastructures import Headers
+from .datastructures import MultiDict
+from .http import dump_cookie
+from .http import dump_options_header
+from .http import parse_cookie
+from .http import parse_date
+from .http import parse_options_header
+from .sansio.multipart import Data
+from .sansio.multipart import Epilogue
+from .sansio.multipart import Field
+from .sansio.multipart import File
+from .sansio.multipart import MultipartEncoder
+from .sansio.multipart import Preamble
+from .urls import _urlencode
+from .urls import iri_to_uri
+from .utils import cached_property
+from .utils import get_content_type
+from .wrappers.request import Request
+from .wrappers.response import Response
+from .wsgi import ClosingIterator
+from .wsgi import get_current_url
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+def stream_encode_multipart(
+ data: t.Mapping[str, t.Any],
+ use_tempfile: bool = True,
+ threshold: int = 1024 * 500,
+ boundary: str | None = None,
+) -> tuple[t.IO[bytes], int, str]:
+ """Encode a dict of values (either strings or file descriptors or
+ :class:`FileStorage` objects.) into a multipart encoded string stored
+ in a file descriptor.
+
+ .. versionchanged:: 3.0
+ The ``charset`` parameter was removed.
+ """
+ if boundary is None:
+ boundary = f"---------------WerkzeugFormPart_{time()}{random()}"
+
+ stream: t.IO[bytes] = BytesIO()
+ total_length = 0
+ on_disk = False
+ write_binary: t.Callable[[bytes], int]
+
+ if use_tempfile:
+
+ def write_binary(s: bytes) -> int:
+ nonlocal stream, total_length, on_disk
+
+ if on_disk:
+ return stream.write(s)
+ else:
+ length = len(s)
+
+ if length + total_length <= threshold:
+ stream.write(s)
+ else:
+ new_stream = t.cast(t.IO[bytes], TemporaryFile("wb+"))
+ new_stream.write(stream.getvalue()) # type: ignore
+ new_stream.write(s)
+ stream = new_stream
+ on_disk = True
+
+ total_length += length
+ return length
+
+ else:
+ write_binary = stream.write
+
+ encoder = MultipartEncoder(boundary.encode())
+ write_binary(encoder.send_event(Preamble(data=b"")))
+ for key, value in _iter_data(data):
+ reader = getattr(value, "read", None)
+ if reader is not None:
+ filename = getattr(value, "filename", getattr(value, "name", None))
+ content_type = getattr(value, "content_type", None)
+ if content_type is None:
+ content_type = (
+ filename
+ and mimetypes.guess_type(filename)[0]
+ or "application/octet-stream"
+ )
+ headers = value.headers
+ headers.update([("Content-Type", content_type)])
+ if filename is None:
+ write_binary(encoder.send_event(Field(name=key, headers=headers)))
+ else:
+ write_binary(
+ encoder.send_event(
+ File(name=key, filename=filename, headers=headers)
+ )
+ )
+ while True:
+ chunk = reader(16384)
+
+ if not chunk:
+ write_binary(encoder.send_event(Data(data=chunk, more_data=False)))
+ break
+
+ write_binary(encoder.send_event(Data(data=chunk, more_data=True)))
+ else:
+ if not isinstance(value, str):
+ value = str(value)
+ write_binary(encoder.send_event(Field(name=key, headers=Headers())))
+ write_binary(encoder.send_event(Data(data=value.encode(), more_data=False)))
+
+ write_binary(encoder.send_event(Epilogue(data=b"")))
+
+ length = stream.tell()
+ stream.seek(0)
+ return stream, length, boundary
+
+
+def encode_multipart(
+ values: t.Mapping[str, t.Any], boundary: str | None = None
+) -> tuple[str, bytes]:
+ """Like `stream_encode_multipart` but returns a tuple in the form
+ (``boundary``, ``data``) where data is bytes.
+
+ .. versionchanged:: 3.0
+ The ``charset`` parameter was removed.
+ """
+ stream, length, boundary = stream_encode_multipart(
+ values, use_tempfile=False, boundary=boundary
+ )
+ return boundary, stream.read()
+
+
+def _iter_data(data: t.Mapping[str, t.Any]) -> t.Iterator[tuple[str, t.Any]]:
+ """Iterate over a mapping that might have a list of values, yielding
+ all key, value pairs. Almost like iter_multi_items but only allows
+ lists, not tuples, of values so tuples can be used for files.
+ """
+ if isinstance(data, MultiDict):
+ yield from data.items(multi=True)
+ else:
+ for key, value in data.items():
+ if isinstance(value, list):
+ for v in value:
+ yield key, v
+ else:
+ yield key, value
+
+
+_TAnyMultiDict = t.TypeVar("_TAnyMultiDict", bound="MultiDict[t.Any, t.Any]")
+
+
+class EnvironBuilder:
+ """This class can be used to conveniently create a WSGI environment
+ for testing purposes. It can be used to quickly create WSGI environments
+ or request objects from arbitrary data.
+
+ The signature of this class is also used in some other places as of
+ Werkzeug 0.5 (:func:`create_environ`, :meth:`Response.from_values`,
+ :meth:`Client.open`). Because of this most of the functionality is
+ available through the constructor alone.
+
+ Files and regular form data can be manipulated independently of each
+ other with the :attr:`form` and :attr:`files` attributes, but are
+ passed with the same argument to the constructor: `data`.
+
+ `data` can be any of these values:
+
+ - a `str` or `bytes` object: The object is converted into an
+ :attr:`input_stream`, the :attr:`content_length` is set and you have to
+ provide a :attr:`content_type`.
+ - a `dict` or :class:`MultiDict`: The keys have to be strings. The values
+ have to be either any of the following objects, or a list of any of the
+ following objects:
+
+ - a :class:`file`-like object: These are converted into
+ :class:`FileStorage` objects automatically.
+ - a `tuple`: The :meth:`~FileMultiDict.add_file` method is called
+ with the key and the unpacked `tuple` items as positional
+ arguments.
+ - a `str`: The string is set as form data for the associated key.
+ - a file-like object: The object content is loaded in memory and then
+ handled like a regular `str` or a `bytes`.
+
+ :param path: the path of the request. In the WSGI environment this will
+ end up as `PATH_INFO`. If the `query_string` is not defined
+ and there is a question mark in the `path` everything after
+ it is used as query string.
+ :param base_url: the base URL is a URL that is used to extract the WSGI
+ URL scheme, host (server name + server port) and the
+ script root (`SCRIPT_NAME`).
+ :param query_string: an optional string or dict with URL parameters.
+ :param method: the HTTP method to use, defaults to `GET`.
+ :param input_stream: an optional input stream. Do not specify this and
+ `data`. As soon as an input stream is set you can't
+ modify :attr:`args` and :attr:`files` unless you
+ set the :attr:`input_stream` to `None` again.
+ :param content_type: The content type for the request. As of 0.5 you
+ don't have to provide this when specifying files
+ and form data via `data`.
+ :param content_length: The content length for the request. You don't
+ have to specify this when providing data via
+ `data`.
+ :param errors_stream: an optional error stream that is used for
+ `wsgi.errors`. Defaults to :data:`stderr`.
+ :param multithread: controls `wsgi.multithread`. Defaults to `False`.
+ :param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
+ :param run_once: controls `wsgi.run_once`. Defaults to `False`.
+ :param headers: an optional list or :class:`Headers` object of headers.
+ :param data: a string or dict of form data or a file-object.
+ See explanation above.
+ :param json: An object to be serialized and assigned to ``data``.
+ Defaults the content type to ``"application/json"``.
+ Serialized with the function assigned to :attr:`json_dumps`.
+ :param environ_base: an optional dict of environment defaults.
+ :param environ_overrides: an optional dict of environment overrides.
+ :param auth: An authorization object to use for the
+ ``Authorization`` header value. A ``(username, password)`` tuple
+ is a shortcut for ``Basic`` authorization.
+
+ .. versionchanged:: 3.0
+ The ``charset`` parameter was removed.
+
+ .. versionchanged:: 2.1
+ ``CONTENT_TYPE`` and ``CONTENT_LENGTH`` are not duplicated as
+ header keys in the environ.
+
+ .. versionchanged:: 2.0
+ ``REQUEST_URI`` and ``RAW_URI`` is the full raw URI including
+ the query string, not only the path.
+
+ .. versionchanged:: 2.0
+ The default :attr:`request_class` is ``Request`` instead of
+ ``BaseRequest``.
+
+ .. versionadded:: 2.0
+ Added the ``auth`` parameter.
+
+ .. versionadded:: 0.15
+ The ``json`` param and :meth:`json_dumps` method.
+
+ .. versionadded:: 0.15
+ The environ has keys ``REQUEST_URI`` and ``RAW_URI`` containing
+ the path before percent-decoding. This is not part of the WSGI
+ PEP, but many WSGI servers include it.
+
+ .. versionchanged:: 0.6
+ ``path`` and ``base_url`` can now be unicode strings that are
+ encoded with :func:`iri_to_uri`.
+ """
+
+ #: the server protocol to use. defaults to HTTP/1.1
+ server_protocol = "HTTP/1.1"
+
+ #: the wsgi version to use. defaults to (1, 0)
+ wsgi_version = (1, 0)
+
+ #: The default request class used by :meth:`get_request`.
+ request_class = Request
+
+ import json
+
+ #: The serialization function used when ``json`` is passed.
+ json_dumps = staticmethod(json.dumps)
+ del json
+
+ _args: MultiDict[str, str] | None
+ _query_string: str | None
+ _input_stream: t.IO[bytes] | None
+ _form: MultiDict[str, str] | None
+ _files: FileMultiDict | None
+
+ def __init__(
+ self,
+ path: str = "/",
+ base_url: str | None = None,
+ query_string: t.Mapping[str, str] | str | None = None,
+ method: str = "GET",
+ input_stream: t.IO[bytes] | None = None,
+ content_type: str | None = None,
+ content_length: int | None = None,
+ errors_stream: t.IO[str] | None = None,
+ multithread: bool = False,
+ multiprocess: bool = False,
+ run_once: bool = False,
+ headers: Headers | t.Iterable[tuple[str, str]] | None = None,
+ data: None | (t.IO[bytes] | str | bytes | t.Mapping[str, t.Any]) = None,
+ environ_base: t.Mapping[str, t.Any] | None = None,
+ environ_overrides: t.Mapping[str, t.Any] | None = None,
+ mimetype: str | None = None,
+ json: t.Mapping[str, t.Any] | None = None,
+ auth: Authorization | tuple[str, str] | None = None,
+ ) -> None:
+ if query_string is not None and "?" in path:
+ raise ValueError("Query string is defined in the path and as an argument")
+ request_uri = urlsplit(path)
+ if query_string is None and "?" in path:
+ query_string = request_uri.query
+
+ self.path = iri_to_uri(request_uri.path)
+ self.request_uri = path
+ if base_url is not None:
+ base_url = iri_to_uri(base_url)
+ self.base_url = base_url # type: ignore
+ if isinstance(query_string, str):
+ self.query_string = query_string
+ else:
+ if query_string is None:
+ query_string = MultiDict()
+ elif not isinstance(query_string, MultiDict):
+ query_string = MultiDict(query_string)
+ self.args = query_string
+ self.method = method
+ if headers is None:
+ headers = Headers()
+ elif not isinstance(headers, Headers):
+ headers = Headers(headers)
+ self.headers = headers
+ if content_type is not None:
+ self.content_type = content_type
+ if errors_stream is None:
+ errors_stream = sys.stderr
+ self.errors_stream = errors_stream
+ self.multithread = multithread
+ self.multiprocess = multiprocess
+ self.run_once = run_once
+ self.environ_base = environ_base
+ self.environ_overrides = environ_overrides
+ self.input_stream = input_stream
+ self.content_length = content_length
+ self.closed = False
+
+ if auth is not None:
+ if isinstance(auth, tuple):
+ auth = Authorization(
+ "basic", {"username": auth[0], "password": auth[1]}
+ )
+
+ self.headers.set("Authorization", auth.to_header())
+
+ if json is not None:
+ if data is not None:
+ raise TypeError("can't provide both json and data")
+
+ data = self.json_dumps(json)
+
+ if self.content_type is None:
+ self.content_type = "application/json"
+
+ if data:
+ if input_stream is not None:
+ raise TypeError("can't provide input stream and data")
+ if hasattr(data, "read"):
+ data = data.read()
+ if isinstance(data, str):
+ data = data.encode()
+ if isinstance(data, bytes):
+ self.input_stream = BytesIO(data)
+ if self.content_length is None:
+ self.content_length = len(data)
+ else:
+ for key, value in _iter_data(data):
+ if isinstance(value, (tuple, dict)) or hasattr(value, "read"):
+ self._add_file_from_data(key, value)
+ else:
+ self.form.setlistdefault(key).append(value)
+
+ if mimetype is not None:
+ self.mimetype = mimetype
+
+ @classmethod
+ def from_environ(cls, environ: WSGIEnvironment, **kwargs: t.Any) -> EnvironBuilder:
+ """Turn an environ dict back into a builder. Any extra kwargs
+ override the args extracted from the environ.
+
+ .. versionchanged:: 2.0
+ Path and query values are passed through the WSGI decoding
+ dance to avoid double encoding.
+
+ .. versionadded:: 0.15
+ """
+ headers = Headers(EnvironHeaders(environ))
+ out = {
+ "path": _wsgi_decoding_dance(environ["PATH_INFO"]),
+ "base_url": cls._make_base_url(
+ environ["wsgi.url_scheme"],
+ headers.pop("Host"),
+ _wsgi_decoding_dance(environ["SCRIPT_NAME"]),
+ ),
+ "query_string": _wsgi_decoding_dance(environ["QUERY_STRING"]),
+ "method": environ["REQUEST_METHOD"],
+ "input_stream": environ["wsgi.input"],
+ "content_type": headers.pop("Content-Type", None),
+ "content_length": headers.pop("Content-Length", None),
+ "errors_stream": environ["wsgi.errors"],
+ "multithread": environ["wsgi.multithread"],
+ "multiprocess": environ["wsgi.multiprocess"],
+ "run_once": environ["wsgi.run_once"],
+ "headers": headers,
+ }
+ out.update(kwargs)
+ return cls(**out)
+
+ def _add_file_from_data(
+ self,
+ key: str,
+ value: (t.IO[bytes] | tuple[t.IO[bytes], str] | tuple[t.IO[bytes], str, str]),
+ ) -> None:
+ """Called in the EnvironBuilder to add files from the data dict."""
+ if isinstance(value, tuple):
+ self.files.add_file(key, *value)
+ else:
+ self.files.add_file(key, value)
+
+ @staticmethod
+ def _make_base_url(scheme: str, host: str, script_root: str) -> str:
+ return urlunsplit((scheme, host, script_root, "", "")).rstrip("/") + "/"
+
+ @property
+ def base_url(self) -> str:
+ """The base URL is used to extract the URL scheme, host name,
+ port, and root path.
+ """
+ return self._make_base_url(self.url_scheme, self.host, self.script_root)
+
+ @base_url.setter
+ def base_url(self, value: str | None) -> None:
+ if value is None:
+ scheme = "http"
+ netloc = "localhost"
+ script_root = ""
+ else:
+ scheme, netloc, script_root, qs, anchor = urlsplit(value)
+ if qs or anchor:
+ raise ValueError("base url must not contain a query string or fragment")
+ self.script_root = script_root.rstrip("/")
+ self.host = netloc
+ self.url_scheme = scheme
+
+ @property
+ def content_type(self) -> str | None:
+ """The content type for the request. Reflected from and to
+ the :attr:`headers`. Do not set if you set :attr:`files` or
+ :attr:`form` for auto detection.
+ """
+ ct = self.headers.get("Content-Type")
+ if ct is None and not self._input_stream:
+ if self._files:
+ return "multipart/form-data"
+ if self._form:
+ return "application/x-www-form-urlencoded"
+ return None
+ return ct
+
+ @content_type.setter
+ def content_type(self, value: str | None) -> None:
+ if value is None:
+ self.headers.pop("Content-Type", None)
+ else:
+ self.headers["Content-Type"] = value
+
+ @property
+ def mimetype(self) -> str | None:
+ """The mimetype (content type without charset etc.)
+
+ .. versionadded:: 0.14
+ """
+ ct = self.content_type
+ return ct.split(";")[0].strip() if ct else None
+
+ @mimetype.setter
+ def mimetype(self, value: str) -> None:
+ self.content_type = get_content_type(value, "utf-8")
+
+ @property
+ def mimetype_params(self) -> t.Mapping[str, str]:
+ """The mimetype parameters as dict. For example if the
+ content type is ``text/html; charset=utf-8`` the params would be
+ ``{'charset': 'utf-8'}``.
+
+ .. versionadded:: 0.14
+ """
+
+ def on_update(d: CallbackDict[str, str]) -> None:
+ self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
+
+ d = parse_options_header(self.headers.get("content-type", ""))[1]
+ return CallbackDict(d, on_update)
+
+ @property
+ def content_length(self) -> int | None:
+ """The content length as integer. Reflected from and to the
+ :attr:`headers`. Do not set if you set :attr:`files` or
+ :attr:`form` for auto detection.
+ """
+ return self.headers.get("Content-Length", type=int)
+
+ @content_length.setter
+ def content_length(self, value: int | None) -> None:
+ if value is None:
+ self.headers.pop("Content-Length", None)
+ else:
+ self.headers["Content-Length"] = str(value)
+
+ def _get_form(self, name: str, storage: type[_TAnyMultiDict]) -> _TAnyMultiDict:
+ """Common behavior for getting the :attr:`form` and
+ :attr:`files` properties.
+
+ :param name: Name of the internal cached attribute.
+ :param storage: Storage class used for the data.
+ """
+ if self.input_stream is not None:
+ raise AttributeError("an input stream is defined")
+
+ rv = getattr(self, name)
+
+ if rv is None:
+ rv = storage()
+ setattr(self, name, rv)
+
+ return rv # type: ignore
+
+ def _set_form(self, name: str, value: MultiDict[str, t.Any]) -> None:
+ """Common behavior for setting the :attr:`form` and
+ :attr:`files` properties.
+
+ :param name: Name of the internal cached attribute.
+ :param value: Value to assign to the attribute.
+ """
+ self._input_stream = None
+ setattr(self, name, value)
+
+ @property
+ def form(self) -> MultiDict[str, str]:
+ """A :class:`MultiDict` of form values."""
+ return self._get_form("_form", MultiDict)
+
+ @form.setter
+ def form(self, value: MultiDict[str, str]) -> None:
+ self._set_form("_form", value)
+
+ @property
+ def files(self) -> FileMultiDict:
+ """A :class:`FileMultiDict` of uploaded files. Use
+ :meth:`~FileMultiDict.add_file` to add new files.
+ """
+ return self._get_form("_files", FileMultiDict)
+
+ @files.setter
+ def files(self, value: FileMultiDict) -> None:
+ self._set_form("_files", value)
+
+ @property
+ def input_stream(self) -> t.IO[bytes] | None:
+ """An optional input stream. This is mutually exclusive with
+ setting :attr:`form` and :attr:`files`, setting it will clear
+ those. Do not provide this if the method is not ``POST`` or
+ another method that has a body.
+ """
+ return self._input_stream
+
+ @input_stream.setter
+ def input_stream(self, value: t.IO[bytes] | None) -> None:
+ self._input_stream = value
+ self._form = None
+ self._files = None
+
+ @property
+ def query_string(self) -> str:
+ """The query string. If you set this to a string
+ :attr:`args` will no longer be available.
+ """
+ if self._query_string is None:
+ if self._args is not None:
+ return _urlencode(self._args)
+ return ""
+ return self._query_string
+
+ @query_string.setter
+ def query_string(self, value: str | None) -> None:
+ self._query_string = value
+ self._args = None
+
+ @property
+ def args(self) -> MultiDict[str, str]:
+ """The URL arguments as :class:`MultiDict`."""
+ if self._query_string is not None:
+ raise AttributeError("a query string is defined")
+ if self._args is None:
+ self._args = MultiDict()
+ return self._args
+
+ @args.setter
+ def args(self, value: MultiDict[str, str] | None) -> None:
+ self._query_string = None
+ self._args = value
+
+ @property
+ def server_name(self) -> str:
+ """The server name (read-only, use :attr:`host` to set)"""
+ return self.host.split(":", 1)[0]
+
+ @property
+ def server_port(self) -> int:
+ """The server port as integer (read-only, use :attr:`host` to set)"""
+ pieces = self.host.split(":", 1)
+
+ if len(pieces) == 2:
+ try:
+ return int(pieces[1])
+ except ValueError:
+ pass
+
+ if self.url_scheme == "https":
+ return 443
+ return 80
+
+ def __del__(self) -> None:
+ try:
+ self.close()
+ except Exception:
+ pass
+
+ def close(self) -> None:
+ """Closes all files. If you put real :class:`file` objects into the
+ :attr:`files` dict you can call this method to automatically close
+ them all in one go.
+ """
+ if self.closed:
+ return
+ try:
+ files = self.files.values()
+ except AttributeError:
+ files = () # type: ignore
+ for f in files:
+ try:
+ f.close()
+ except Exception:
+ pass
+ self.closed = True
+
+ def get_environ(self) -> WSGIEnvironment:
+ """Return the built environ.
+
+ .. versionchanged:: 0.15
+ The content type and length headers are set based on
+ input stream detection. Previously this only set the WSGI
+ keys.
+ """
+ input_stream = self.input_stream
+ content_length = self.content_length
+
+ mimetype = self.mimetype
+ content_type = self.content_type
+
+ if input_stream is not None:
+ start_pos = input_stream.tell()
+ input_stream.seek(0, 2)
+ end_pos = input_stream.tell()
+ input_stream.seek(start_pos)
+ content_length = end_pos - start_pos
+ elif mimetype == "multipart/form-data":
+ input_stream, content_length, boundary = stream_encode_multipart(
+ CombinedMultiDict([self.form, self.files])
+ )
+ content_type = f'{mimetype}; boundary="{boundary}"'
+ elif mimetype == "application/x-www-form-urlencoded":
+ form_encoded = _urlencode(self.form).encode("ascii")
+ content_length = len(form_encoded)
+ input_stream = BytesIO(form_encoded)
+ else:
+ input_stream = BytesIO()
+
+ result: WSGIEnvironment = {}
+ if self.environ_base:
+ result.update(self.environ_base)
+
+ def _path_encode(x: str) -> str:
+ return _wsgi_encoding_dance(unquote(x))
+
+ raw_uri = _wsgi_encoding_dance(self.request_uri)
+ result.update(
+ {
+ "REQUEST_METHOD": self.method,
+ "SCRIPT_NAME": _path_encode(self.script_root),
+ "PATH_INFO": _path_encode(self.path),
+ "QUERY_STRING": _wsgi_encoding_dance(self.query_string),
+ # Non-standard, added by mod_wsgi, uWSGI
+ "REQUEST_URI": raw_uri,
+ # Non-standard, added by gunicorn
+ "RAW_URI": raw_uri,
+ "SERVER_NAME": self.server_name,
+ "SERVER_PORT": str(self.server_port),
+ "HTTP_HOST": self.host,
+ "SERVER_PROTOCOL": self.server_protocol,
+ "wsgi.version": self.wsgi_version,
+ "wsgi.url_scheme": self.url_scheme,
+ "wsgi.input": input_stream,
+ "wsgi.errors": self.errors_stream,
+ "wsgi.multithread": self.multithread,
+ "wsgi.multiprocess": self.multiprocess,
+ "wsgi.run_once": self.run_once,
+ }
+ )
+
+ headers = self.headers.copy()
+ # Don't send these as headers, they're part of the environ.
+ headers.remove("Content-Type")
+ headers.remove("Content-Length")
+
+ if content_type is not None:
+ result["CONTENT_TYPE"] = content_type
+
+ if content_length is not None:
+ result["CONTENT_LENGTH"] = str(content_length)
+
+ combined_headers = defaultdict(list)
+
+ for key, value in headers.to_wsgi_list():
+ combined_headers[f"HTTP_{key.upper().replace('-', '_')}"].append(value)
+
+ for key, values in combined_headers.items():
+ result[key] = ", ".join(values)
+
+ if self.environ_overrides:
+ result.update(self.environ_overrides)
+
+ return result
+
+ def get_request(self, cls: type[Request] | None = None) -> Request:
+ """Returns a request with the data. If the request class is not
+ specified :attr:`request_class` is used.
+
+ :param cls: The request wrapper to use.
+ """
+ if cls is None:
+ cls = self.request_class
+
+ return cls(self.get_environ())
+
+
+class ClientRedirectError(Exception):
+ """If a redirect loop is detected when using follow_redirects=True with
+ the :cls:`Client`, then this exception is raised.
+ """
+
+
+class Client:
+ """Simulate sending requests to a WSGI application without running a WSGI or HTTP
+ server.
+
+ :param application: The WSGI application to make requests to.
+ :param response_wrapper: A :class:`.Response` class to wrap response data with.
+ Defaults to :class:`.TestResponse`. If it's not a subclass of ``TestResponse``,
+ one will be created.
+ :param use_cookies: Persist cookies from ``Set-Cookie`` response headers to the
+ ``Cookie`` header in subsequent requests. Domain and path matching is supported,
+ but other cookie parameters are ignored.
+ :param allow_subdomain_redirects: Allow requests to follow redirects to subdomains.
+ Enable this if the application handles subdomains and redirects between them.
+
+ .. versionchanged:: 2.3
+ Simplify cookie implementation, support domain and path matching.
+
+ .. versionchanged:: 2.1
+ All data is available as properties on the returned response object. The
+ response cannot be returned as a tuple.
+
+ .. versionchanged:: 2.0
+ ``response_wrapper`` is always a subclass of :class:``TestResponse``.
+
+ .. versionchanged:: 0.5
+ Added the ``use_cookies`` parameter.
+ """
+
+ def __init__(
+ self,
+ application: WSGIApplication,
+ response_wrapper: type[Response] | None = None,
+ use_cookies: bool = True,
+ allow_subdomain_redirects: bool = False,
+ ) -> None:
+ self.application = application
+
+ if response_wrapper in {None, Response}:
+ response_wrapper = TestResponse
+ elif response_wrapper is not None and not issubclass(
+ response_wrapper, TestResponse
+ ):
+ response_wrapper = type(
+ "WrapperTestResponse",
+ (TestResponse, response_wrapper),
+ {},
+ )
+
+ self.response_wrapper = t.cast(t.Type["TestResponse"], response_wrapper)
+
+ if use_cookies:
+ self._cookies: dict[tuple[str, str, str], Cookie] | None = {}
+ else:
+ self._cookies = None
+
+ self.allow_subdomain_redirects = allow_subdomain_redirects
+
+ def get_cookie(
+ self, key: str, domain: str = "localhost", path: str = "/"
+ ) -> Cookie | None:
+ """Return a :class:`.Cookie` if it exists. Cookies are uniquely identified by
+ ``(domain, path, key)``.
+
+ :param key: The decoded form of the key for the cookie.
+ :param domain: The domain the cookie was set for.
+ :param path: The path the cookie was set for.
+
+ .. versionadded:: 2.3
+ """
+ if self._cookies is None:
+ raise TypeError(
+ "Cookies are disabled. Create a client with 'use_cookies=True'."
+ )
+
+ return self._cookies.get((domain, path, key))
+
+ def set_cookie(
+ self,
+ key: str,
+ value: str = "",
+ *,
+ domain: str = "localhost",
+ origin_only: bool = True,
+ path: str = "/",
+ **kwargs: t.Any,
+ ) -> None:
+ """Set a cookie to be sent in subsequent requests.
+
+ This is a convenience to skip making a test request to a route that would set
+ the cookie. To test the cookie, make a test request to a route that uses the
+ cookie value.
+
+ The client uses ``domain``, ``origin_only``, and ``path`` to determine which
+ cookies to send with a request. It does not use other cookie parameters that
+ browsers use, since they're not applicable in tests.
+
+ :param key: The key part of the cookie.
+ :param value: The value part of the cookie.
+ :param domain: Send this cookie with requests that match this domain. If
+ ``origin_only`` is true, it must be an exact match, otherwise it may be a
+ suffix match.
+ :param origin_only: Whether the domain must be an exact match to the request.
+ :param path: Send this cookie with requests that match this path either exactly
+ or as a prefix.
+ :param kwargs: Passed to :func:`.dump_cookie`.
+
+ .. versionchanged:: 3.0
+ The parameter ``server_name`` is removed. The first parameter is
+ ``key``. Use the ``domain`` and ``origin_only`` parameters instead.
+
+ .. versionchanged:: 2.3
+ The ``origin_only`` parameter was added.
+
+ .. versionchanged:: 2.3
+ The ``domain`` parameter defaults to ``localhost``.
+ """
+ if self._cookies is None:
+ raise TypeError(
+ "Cookies are disabled. Create a client with 'use_cookies=True'."
+ )
+
+ cookie = Cookie._from_response_header(
+ domain, "/", dump_cookie(key, value, domain=domain, path=path, **kwargs)
+ )
+ cookie.origin_only = origin_only
+
+ if cookie._should_delete:
+ self._cookies.pop(cookie._storage_key, None)
+ else:
+ self._cookies[cookie._storage_key] = cookie
+
+ def delete_cookie(
+ self,
+ key: str,
+ *,
+ domain: str = "localhost",
+ path: str = "/",
+ ) -> None:
+ """Delete a cookie if it exists. Cookies are uniquely identified by
+ ``(domain, path, key)``.
+
+ :param key: The decoded form of the key for the cookie.
+ :param domain: The domain the cookie was set for.
+ :param path: The path the cookie was set for.
+
+ .. versionchanged:: 3.0
+ The ``server_name`` parameter is removed. The first parameter is
+ ``key``. Use the ``domain`` parameter instead.
+
+ .. versionchanged:: 3.0
+ The ``secure``, ``httponly`` and ``samesite`` parameters are removed.
+
+ .. versionchanged:: 2.3
+ The ``domain`` parameter defaults to ``localhost``.
+ """
+ if self._cookies is None:
+ raise TypeError(
+ "Cookies are disabled. Create a client with 'use_cookies=True'."
+ )
+
+ self._cookies.pop((domain, path, key), None)
+
+ def _add_cookies_to_wsgi(self, environ: WSGIEnvironment) -> None:
+ """If cookies are enabled, set the ``Cookie`` header in the environ to the
+ cookies that are applicable to the request host and path.
+
+ :meta private:
+
+ .. versionadded:: 2.3
+ """
+ if self._cookies is None:
+ return
+
+ url = urlsplit(get_current_url(environ))
+ server_name = url.hostname or "localhost"
+ value = "; ".join(
+ c._to_request_header()
+ for c in self._cookies.values()
+ if c._matches_request(server_name, url.path)
+ )
+
+ if value:
+ environ["HTTP_COOKIE"] = value
+ else:
+ environ.pop("HTTP_COOKIE", None)
+
+ def _update_cookies_from_response(
+ self, server_name: str, path: str, headers: list[str]
+ ) -> None:
+ """If cookies are enabled, update the stored cookies from any ``Set-Cookie``
+ headers in the response.
+
+ :meta private:
+
+ .. versionadded:: 2.3
+ """
+ if self._cookies is None:
+ return
+
+ for header in headers:
+ cookie = Cookie._from_response_header(server_name, path, header)
+
+ if cookie._should_delete:
+ self._cookies.pop(cookie._storage_key, None)
+ else:
+ self._cookies[cookie._storage_key] = cookie
+
+ def run_wsgi_app(
+ self, environ: WSGIEnvironment, buffered: bool = False
+ ) -> tuple[t.Iterable[bytes], str, Headers]:
+ """Runs the wrapped WSGI app with the given environment.
+
+ :meta private:
+ """
+ self._add_cookies_to_wsgi(environ)
+ rv = run_wsgi_app(self.application, environ, buffered=buffered)
+ url = urlsplit(get_current_url(environ))
+ self._update_cookies_from_response(
+ url.hostname or "localhost", url.path, rv[2].getlist("Set-Cookie")
+ )
+ return rv
+
+ def resolve_redirect(
+ self, response: TestResponse, buffered: bool = False
+ ) -> TestResponse:
+ """Perform a new request to the location given by the redirect
+ response to the previous request.
+
+ :meta private:
+ """
+ scheme, netloc, path, qs, anchor = urlsplit(response.location)
+ builder = EnvironBuilder.from_environ(
+ response.request.environ, path=path, query_string=qs
+ )
+
+ to_name_parts = netloc.split(":", 1)[0].split(".")
+ from_name_parts = builder.server_name.split(".")
+
+ if to_name_parts != [""]:
+ # The new location has a host, use it for the base URL.
+ builder.url_scheme = scheme
+ builder.host = netloc
+ else:
+ # A local redirect with autocorrect_location_header=False
+ # doesn't have a host, so use the request's host.
+ to_name_parts = from_name_parts
+
+ # Explain why a redirect to a different server name won't be followed.
+ if to_name_parts != from_name_parts:
+ if to_name_parts[-len(from_name_parts) :] == from_name_parts:
+ if not self.allow_subdomain_redirects:
+ raise RuntimeError("Following subdomain redirects is not enabled.")
+ else:
+ raise RuntimeError("Following external redirects is not supported.")
+
+ path_parts = path.split("/")
+ root_parts = builder.script_root.split("/")
+
+ if path_parts[: len(root_parts)] == root_parts:
+ # Strip the script root from the path.
+ builder.path = path[len(builder.script_root) :]
+ else:
+ # The new location is not under the script root, so use the
+ # whole path and clear the previous root.
+ builder.path = path
+ builder.script_root = ""
+
+ # Only 307 and 308 preserve all of the original request.
+ if response.status_code not in {307, 308}:
+ # HEAD is preserved, everything else becomes GET.
+ if builder.method != "HEAD":
+ builder.method = "GET"
+
+ # Clear the body and the headers that describe it.
+
+ if builder.input_stream is not None:
+ builder.input_stream.close()
+ builder.input_stream = None
+
+ builder.content_type = None
+ builder.content_length = None
+ builder.headers.pop("Transfer-Encoding", None)
+
+ return self.open(builder, buffered=buffered)
+
+ def open(
+ self,
+ *args: t.Any,
+ buffered: bool = False,
+ follow_redirects: bool = False,
+ **kwargs: t.Any,
+ ) -> TestResponse:
+ """Generate an environ dict from the given arguments, make a
+ request to the application using it, and return the response.
+
+ :param args: Passed to :class:`EnvironBuilder` to create the
+ environ for the request. If a single arg is passed, it can
+ be an existing :class:`EnvironBuilder` or an environ dict.
+ :param buffered: Convert the iterator returned by the app into
+ a list. If the iterator has a ``close()`` method, it is
+ called automatically.
+ :param follow_redirects: Make additional requests to follow HTTP
+ redirects until a non-redirect status is returned.
+ :attr:`TestResponse.history` lists the intermediate
+ responses.
+
+ .. versionchanged:: 2.1
+ Removed the ``as_tuple`` parameter.
+
+ .. versionchanged:: 2.0
+ The request input stream is closed when calling
+ ``response.close()``. Input streams for redirects are
+ automatically closed.
+
+ .. versionchanged:: 0.5
+ If a dict is provided as file in the dict for the ``data``
+ parameter the content type has to be called ``content_type``
+ instead of ``mimetype``. This change was made for
+ consistency with :class:`werkzeug.FileWrapper`.
+
+ .. versionchanged:: 0.5
+ Added the ``follow_redirects`` parameter.
+ """
+ request: Request | None = None
+
+ if not kwargs and len(args) == 1:
+ arg = args[0]
+
+ if isinstance(arg, EnvironBuilder):
+ request = arg.get_request()
+ elif isinstance(arg, dict):
+ request = EnvironBuilder.from_environ(arg).get_request()
+ elif isinstance(arg, Request):
+ request = arg
+
+ if request is None:
+ builder = EnvironBuilder(*args, **kwargs)
+
+ try:
+ request = builder.get_request()
+ finally:
+ builder.close()
+
+ response_parts = self.run_wsgi_app(request.environ, buffered=buffered)
+ response = self.response_wrapper(*response_parts, request=request)
+
+ redirects = set()
+ history: list[TestResponse] = []
+
+ if not follow_redirects:
+ return response
+
+ while response.status_code in {
+ 301,
+ 302,
+ 303,
+ 305,
+ 307,
+ 308,
+ }:
+ # Exhaust intermediate response bodies to ensure middleware
+ # that returns an iterator runs any cleanup code.
+ if not buffered:
+ response.make_sequence()
+ response.close()
+
+ new_redirect_entry = (response.location, response.status_code)
+
+ if new_redirect_entry in redirects:
+ raise ClientRedirectError(
+ f"Loop detected: A {response.status_code} redirect"
+ f" to {response.location} was already made."
+ )
+
+ redirects.add(new_redirect_entry)
+ response.history = tuple(history)
+ history.append(response)
+ response = self.resolve_redirect(response, buffered=buffered)
+ else:
+ # This is the final request after redirects.
+ response.history = tuple(history)
+ # Close the input stream when closing the response, in case
+ # the input is an open temporary file.
+ response.call_on_close(request.input_stream.close)
+ return response
+
+ def get(self, *args: t.Any, **kw: t.Any) -> TestResponse:
+ """Call :meth:`open` with ``method`` set to ``GET``."""
+ kw["method"] = "GET"
+ return self.open(*args, **kw)
+
+ def post(self, *args: t.Any, **kw: t.Any) -> TestResponse:
+ """Call :meth:`open` with ``method`` set to ``POST``."""
+ kw["method"] = "POST"
+ return self.open(*args, **kw)
+
+ def put(self, *args: t.Any, **kw: t.Any) -> TestResponse:
+ """Call :meth:`open` with ``method`` set to ``PUT``."""
+ kw["method"] = "PUT"
+ return self.open(*args, **kw)
+
+ def delete(self, *args: t.Any, **kw: t.Any) -> TestResponse:
+ """Call :meth:`open` with ``method`` set to ``DELETE``."""
+ kw["method"] = "DELETE"
+ return self.open(*args, **kw)
+
+ def patch(self, *args: t.Any, **kw: t.Any) -> TestResponse:
+ """Call :meth:`open` with ``method`` set to ``PATCH``."""
+ kw["method"] = "PATCH"
+ return self.open(*args, **kw)
+
+ def options(self, *args: t.Any, **kw: t.Any) -> TestResponse:
+ """Call :meth:`open` with ``method`` set to ``OPTIONS``."""
+ kw["method"] = "OPTIONS"
+ return self.open(*args, **kw)
+
+ def head(self, *args: t.Any, **kw: t.Any) -> TestResponse:
+ """Call :meth:`open` with ``method`` set to ``HEAD``."""
+ kw["method"] = "HEAD"
+ return self.open(*args, **kw)
+
+ def trace(self, *args: t.Any, **kw: t.Any) -> TestResponse:
+ """Call :meth:`open` with ``method`` set to ``TRACE``."""
+ kw["method"] = "TRACE"
+ return self.open(*args, **kw)
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.application!r}>"
+
+
+def create_environ(*args: t.Any, **kwargs: t.Any) -> WSGIEnvironment:
+ """Create a new WSGI environ dict based on the values passed. The first
+ parameter should be the path of the request which defaults to '/'. The
+ second one can either be an absolute path (in that case the host is
+ localhost:80) or a full path to the request with scheme, netloc port and
+ the path to the script.
+
+ This accepts the same arguments as the :class:`EnvironBuilder`
+ constructor.
+
+ .. versionchanged:: 0.5
+ This function is now a thin wrapper over :class:`EnvironBuilder` which
+ was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
+ and `charset` parameters were added.
+ """
+ builder = EnvironBuilder(*args, **kwargs)
+
+ try:
+ return builder.get_environ()
+ finally:
+ builder.close()
+
+
+def run_wsgi_app(
+ app: WSGIApplication, environ: WSGIEnvironment, buffered: bool = False
+) -> tuple[t.Iterable[bytes], str, Headers]:
+ """Return a tuple in the form (app_iter, status, headers) of the
+ application output. This works best if you pass it an application that
+ returns an iterator all the time.
+
+ Sometimes applications may use the `write()` callable returned
+ by the `start_response` function. This tries to resolve such edge
+ cases automatically. But if you don't get the expected output you
+ should set `buffered` to `True` which enforces buffering.
+
+ If passed an invalid WSGI application the behavior of this function is
+ undefined. Never pass non-conforming WSGI applications to this function.
+
+ :param app: the application to execute.
+ :param buffered: set to `True` to enforce buffering.
+ :return: tuple in the form ``(app_iter, status, headers)``
+ """
+ # Copy environ to ensure any mutations by the app (ProxyFix, for
+ # example) don't affect subsequent requests (such as redirects).
+ environ = _get_environ(environ).copy()
+ status: str
+ response: tuple[str, list[tuple[str, str]]] | None = None
+ buffer: list[bytes] = []
+
+ def start_response(status, headers, exc_info=None): # type: ignore
+ nonlocal response
+
+ if exc_info:
+ try:
+ raise exc_info[1].with_traceback(exc_info[2])
+ finally:
+ exc_info = None
+
+ response = (status, headers)
+ return buffer.append
+
+ app_rv = app(environ, start_response)
+ close_func = getattr(app_rv, "close", None)
+ app_iter: t.Iterable[bytes] = iter(app_rv)
+
+ # when buffering we emit the close call early and convert the
+ # application iterator into a regular list
+ if buffered:
+ try:
+ app_iter = list(app_iter)
+ finally:
+ if close_func is not None:
+ close_func()
+
+ # otherwise we iterate the application iter until we have a response, chain
+ # the already received data with the already collected data and wrap it in
+ # a new `ClosingIterator` if we need to restore a `close` callable from the
+ # original return value.
+ else:
+ for item in app_iter:
+ buffer.append(item)
+
+ if response is not None:
+ break
+
+ if buffer:
+ app_iter = chain(buffer, app_iter)
+
+ if close_func is not None and app_iter is not app_rv:
+ app_iter = ClosingIterator(app_iter, close_func)
+
+ status, headers = response # type: ignore
+ return app_iter, status, Headers(headers)
+
+
+class TestResponse(Response):
+ """:class:`~werkzeug.wrappers.Response` subclass that provides extra
+ information about requests made with the test :class:`Client`.
+
+ Test client requests will always return an instance of this class.
+ If a custom response class is passed to the client, it is
+ subclassed along with this to support test information.
+
+ If the test request included large files, or if the application is
+ serving a file, call :meth:`close` to close any open files and
+ prevent Python showing a ``ResourceWarning``.
+
+ .. versionchanged:: 2.2
+ Set the ``default_mimetype`` to None to prevent a mimetype being
+ assumed if missing.
+
+ .. versionchanged:: 2.1
+ Response instances cannot be treated as tuples.
+
+ .. versionadded:: 2.0
+ Test client methods always return instances of this class.
+ """
+
+ default_mimetype = None
+ # Don't assume a mimetype, instead use whatever the response provides
+
+ request: Request
+ """A request object with the environ used to make the request that
+ resulted in this response.
+ """
+
+ history: tuple[TestResponse, ...]
+ """A list of intermediate responses. Populated when the test request
+ is made with ``follow_redirects`` enabled.
+ """
+
+ # Tell Pytest to ignore this, it's not a test class.
+ __test__ = False
+
+ def __init__(
+ self,
+ response: t.Iterable[bytes],
+ status: str,
+ headers: Headers,
+ request: Request,
+ history: tuple[TestResponse] = (), # type: ignore
+ **kwargs: t.Any,
+ ) -> None:
+ super().__init__(response, status, headers, **kwargs)
+ self.request = request
+ self.history = history
+ self._compat_tuple = response, status, headers
+
+ @cached_property
+ def text(self) -> str:
+ """The response data as text. A shortcut for
+ ``response.get_data(as_text=True)``.
+
+ .. versionadded:: 2.1
+ """
+ return self.get_data(as_text=True)
+
+
+@dataclasses.dataclass
+class Cookie:
+ """A cookie key, value, and parameters.
+
+ The class itself is not a public API. Its attributes are documented for inspection
+ with :meth:`.Client.get_cookie` only.
+
+ .. versionadded:: 2.3
+ """
+
+ key: str
+ """The cookie key, encoded as a client would see it."""
+
+ value: str
+ """The cookie key, encoded as a client would see it."""
+
+ decoded_key: str
+ """The cookie key, decoded as the application would set and see it."""
+
+ decoded_value: str
+ """The cookie value, decoded as the application would set and see it."""
+
+ expires: datetime | None
+ """The time at which the cookie is no longer valid."""
+
+ max_age: int | None
+ """The number of seconds from when the cookie was set at which it is
+ no longer valid.
+ """
+
+ domain: str
+ """The domain that the cookie was set for, or the request domain if not set."""
+
+ origin_only: bool
+ """Whether the cookie will be sent for exact domain matches only. This is ``True``
+ if the ``Domain`` parameter was not present.
+ """
+
+ path: str
+ """The path that the cookie was set for."""
+
+ secure: bool | None
+ """The ``Secure`` parameter."""
+
+ http_only: bool | None
+ """The ``HttpOnly`` parameter."""
+
+ same_site: str | None
+ """The ``SameSite`` parameter."""
+
+ def _matches_request(self, server_name: str, path: str) -> bool:
+ return (
+ server_name == self.domain
+ or (
+ not self.origin_only
+ and server_name.endswith(self.domain)
+ and server_name[: -len(self.domain)].endswith(".")
+ )
+ ) and (
+ path == self.path
+ or (
+ path.startswith(self.path)
+ and path[len(self.path) - self.path.endswith("/") :].startswith("/")
+ )
+ )
+
+ def _to_request_header(self) -> str:
+ return f"{self.key}={self.value}"
+
+ @classmethod
+ def _from_response_header(cls, server_name: str, path: str, header: str) -> te.Self:
+ header, _, parameters_str = header.partition(";")
+ key, _, value = header.partition("=")
+ decoded_key, decoded_value = next(parse_cookie(header).items())
+ params = {}
+
+ for item in parameters_str.split(";"):
+ k, sep, v = item.partition("=")
+ params[k.strip().lower()] = v.strip() if sep else None
+
+ return cls(
+ key=key.strip(),
+ value=value.strip(),
+ decoded_key=decoded_key,
+ decoded_value=decoded_value,
+ expires=parse_date(params.get("expires")),
+ max_age=int(params["max-age"] or 0) if "max-age" in params else None,
+ domain=params.get("domain") or server_name,
+ origin_only="domain" not in params,
+ path=params.get("path") or path.rpartition("/")[0] or "/",
+ secure="secure" in params,
+ http_only="httponly" in params,
+ same_site=params.get("samesite"),
+ )
+
+ @property
+ def _storage_key(self) -> tuple[str, str, str]:
+ return self.domain, self.path, self.decoded_key
+
+ @property
+ def _should_delete(self) -> bool:
+ return self.max_age == 0 or (
+ self.expires is not None and self.expires.timestamp() == 0
+ )
diff --git a/venv/lib/python3.8/site-packages/werkzeug/testapp.py b/venv/lib/python3.8/site-packages/werkzeug/testapp.py
new file mode 100644
index 0000000..cdf7fac
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/testapp.py
@@ -0,0 +1,194 @@
+"""A small application that can be used to test a WSGI server and check
+it for WSGI compliance.
+"""
+
+from __future__ import annotations
+
+import importlib.metadata
+import os
+import sys
+import typing as t
+from textwrap import wrap
+
+from markupsafe import escape
+
+from .wrappers.request import Request
+from .wrappers.response import Response
+
+TEMPLATE = """\
+<!doctype html>
+<html lang=en>
+<title>WSGI Information</title>
+<style type="text/css">
+ @import url(https://fonts.googleapis.com/css?family=Ubuntu);
+
+ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+ 'Verdana', sans-serif; background-color: white; color: #000;
+ font-size: 15px; text-align: center; }
+ div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
+ background-color: white; }
+ h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
+ 'Geneva', 'Verdana', sans-serif; font-weight: normal; }
+ h1 { margin: 0 0 30px 0; }
+ h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
+ table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
+ table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
+ font-weight: normal; width: 18em; vertical-align: top;
+ padding: 0.5em 0 0.1em 0.5em; }
+ table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
+ code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
+ monospace; font-size: 0.7em; }
+ ul li { line-height: 1.5em; }
+ ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
+ list-style: none; background: #E8EFF0; }
+ ul.path li { line-height: 1.6em; }
+ li.virtual { color: #999; text-decoration: underline; }
+ li.exp { background: white; }
+</style>
+<div class="box">
+ <h1>WSGI Information</h1>
+ <p>
+ This page displays all available information about the WSGI server and
+ the underlying Python interpreter.
+ <h2 id="python-interpreter">Python Interpreter</h2>
+ <table>
+ <tr>
+ <th>Python Version
+ <td>%(python_version)s
+ <tr>
+ <th>Platform
+ <td>%(platform)s [%(os)s]
+ <tr>
+ <th>API Version
+ <td>%(api_version)s
+ <tr>
+ <th>Byteorder
+ <td>%(byteorder)s
+ <tr>
+ <th>Werkzeug Version
+ <td>%(werkzeug_version)s
+ </table>
+ <h2 id="wsgi-environment">WSGI Environment</h2>
+ <table>%(wsgi_env)s</table>
+ <h2 id="installed-eggs">Installed Eggs</h2>
+ <p>
+ The following python packages were installed on the system as
+ Python eggs:
+ <ul>%(python_eggs)s</ul>
+ <h2 id="sys-path">System Path</h2>
+ <p>
+ The following paths are the current contents of the load path. The
+ following entries are looked up for Python packages. Note that not
+ all items in this path are folders. Gray and underlined items are
+ entries pointing to invalid resources or used by custom import hooks
+ such as the zip importer.
+ <p>
+ Items with a bright background were expanded for display from a relative
+ path. If you encounter such paths in the output you might want to check
+ your setup as relative paths are usually problematic in multithreaded
+ environments.
+ <ul class="path">%(sys_path)s</ul>
+</div>
+"""
+
+
+def iter_sys_path() -> t.Iterator[tuple[str, bool, bool]]:
+ if os.name == "posix":
+
+ def strip(x: str) -> str:
+ prefix = os.path.expanduser("~")
+ if x.startswith(prefix):
+ x = f"~{x[len(prefix) :]}"
+ return x
+
+ else:
+
+ def strip(x: str) -> str:
+ return x
+
+ cwd = os.path.abspath(os.getcwd())
+ for item in sys.path:
+ path = os.path.join(cwd, item or os.path.curdir)
+ yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item
+
+
+@Request.application
+def test_app(req: Request) -> Response:
+ """Simple test application that dumps the environment. You can use
+ it to check if Werkzeug is working properly:
+
+ .. sourcecode:: pycon
+
+ >>> from werkzeug.serving import run_simple
+ >>> from werkzeug.testapp import test_app
+ >>> run_simple('localhost', 3000, test_app)
+ * Running on http://localhost:3000/
+
+ The application displays important information from the WSGI environment,
+ the Python interpreter and the installed libraries.
+ """
+ try:
+ import pkg_resources
+ except ImportError:
+ eggs: t.Iterable[t.Any] = ()
+ else:
+ eggs = sorted(
+ pkg_resources.working_set,
+ key=lambda x: x.project_name.lower(),
+ )
+ python_eggs = []
+ for egg in eggs:
+ try:
+ version = egg.version
+ except (ValueError, AttributeError):
+ version = "unknown"
+ python_eggs.append(
+ f"<li>{escape(egg.project_name)} <small>[{escape(version)}]</small>"
+ )
+
+ wsgi_env = []
+ sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
+ for key, value in sorted_environ:
+ value = "".join(wrap(str(escape(repr(value)))))
+ wsgi_env.append(f"<tr><th>{escape(key)}<td><code>{value}</code>")
+
+ sys_path = []
+ for item, virtual, expanded in iter_sys_path():
+ css = []
+ if virtual:
+ css.append("virtual")
+ if expanded:
+ css.append("exp")
+ class_str = f' class="{" ".join(css)}"' if css else ""
+ sys_path.append(f"<li{class_str}>{escape(item)}")
+
+ context = {
+ "python_version": "<br>".join(escape(sys.version).splitlines()),
+ "platform": escape(sys.platform),
+ "os": escape(os.name),
+ "api_version": sys.api_version,
+ "byteorder": sys.byteorder,
+ "werkzeug_version": _get_werkzeug_version(),
+ "python_eggs": "\n".join(python_eggs),
+ "wsgi_env": "\n".join(wsgi_env),
+ "sys_path": "\n".join(sys_path),
+ }
+ return Response(TEMPLATE % context, mimetype="text/html")
+
+
+_werkzeug_version = ""
+
+
+def _get_werkzeug_version() -> str:
+ global _werkzeug_version
+
+ if not _werkzeug_version:
+ _werkzeug_version = importlib.metadata.version("werkzeug")
+
+ return _werkzeug_version
+
+
+if __name__ == "__main__":
+ from .serving import run_simple
+
+ run_simple("localhost", 5000, test_app, use_reloader=True)
diff --git a/venv/lib/python3.8/site-packages/werkzeug/urls.py b/venv/lib/python3.8/site-packages/werkzeug/urls.py
new file mode 100644
index 0000000..5bffe39
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/urls.py
@@ -0,0 +1,203 @@
+from __future__ import annotations
+
+import codecs
+import re
+import typing as t
+import urllib.parse
+from urllib.parse import quote
+from urllib.parse import unquote
+from urllib.parse import urlencode
+from urllib.parse import urlsplit
+from urllib.parse import urlunsplit
+
+from .datastructures import iter_multi_items
+
+
+def _codec_error_url_quote(e: UnicodeError) -> tuple[str, int]:
+ """Used in :func:`uri_to_iri` after unquoting to re-quote any
+ invalid bytes.
+ """
+ # the docs state that UnicodeError does have these attributes,
+ # but mypy isn't picking them up
+ out = quote(e.object[e.start : e.end], safe="") # type: ignore
+ return out, e.end # type: ignore
+
+
+codecs.register_error("werkzeug.url_quote", _codec_error_url_quote)
+
+
+def _make_unquote_part(name: str, chars: str) -> t.Callable[[str], str]:
+ """Create a function that unquotes all percent encoded characters except those
+ given. This allows working with unquoted characters if possible while not changing
+ the meaning of a given part of a URL.
+ """
+ choices = "|".join(f"{ord(c):02X}" for c in sorted(chars))
+ pattern = re.compile(f"((?:%(?:{choices}))+)", re.I)
+
+ def _unquote_partial(value: str) -> str:
+ parts = iter(pattern.split(value))
+ out = []
+
+ for part in parts:
+ out.append(unquote(part, "utf-8", "werkzeug.url_quote"))
+ out.append(next(parts, ""))
+
+ return "".join(out)
+
+ _unquote_partial.__name__ = f"_unquote_{name}"
+ return _unquote_partial
+
+
+# characters that should remain quoted in URL parts
+# based on https://url.spec.whatwg.org/#percent-encoded-bytes
+# always keep all controls, space, and % quoted
+_always_unsafe = bytes((*range(0x21), 0x25, 0x7F)).decode()
+_unquote_fragment = _make_unquote_part("fragment", _always_unsafe)
+_unquote_query = _make_unquote_part("query", _always_unsafe + "&=+#")
+_unquote_path = _make_unquote_part("path", _always_unsafe + "/?#")
+_unquote_user = _make_unquote_part("user", _always_unsafe + ":@/?#")
+
+
+def uri_to_iri(uri: str) -> str:
+ """Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
+ leaving all reserved and invalid characters quoted. If the URL has
+ a domain, it is decoded from Punycode.
+
+ >>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
+ 'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
+
+ :param uri: The URI to convert.
+
+ .. versionchanged:: 3.0
+ Passing a tuple or bytes, and the ``charset`` and ``errors`` parameters,
+ are removed.
+
+ .. versionchanged:: 2.3
+ Which characters remain quoted is specific to each part of the URL.
+
+ .. versionchanged:: 0.15
+ All reserved and invalid characters remain quoted. Previously,
+ only some reserved characters were preserved, and invalid bytes
+ were replaced instead of left quoted.
+
+ .. versionadded:: 0.6
+ """
+ parts = urlsplit(uri)
+ path = _unquote_path(parts.path)
+ query = _unquote_query(parts.query)
+ fragment = _unquote_fragment(parts.fragment)
+
+ if parts.hostname:
+ netloc = _decode_idna(parts.hostname)
+ else:
+ netloc = ""
+
+ if ":" in netloc:
+ netloc = f"[{netloc}]"
+
+ if parts.port:
+ netloc = f"{netloc}:{parts.port}"
+
+ if parts.username:
+ auth = _unquote_user(parts.username)
+
+ if parts.password:
+ password = _unquote_user(parts.password)
+ auth = f"{auth}:{password}"
+
+ netloc = f"{auth}@{netloc}"
+
+ return urlunsplit((parts.scheme, netloc, path, query, fragment))
+
+
+def iri_to_uri(iri: str) -> str:
+ """Convert an IRI to a URI. All non-ASCII and unsafe characters are
+ quoted. If the URL has a domain, it is encoded to Punycode.
+
+ >>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
+ 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
+
+ :param iri: The IRI to convert.
+
+ .. versionchanged:: 3.0
+ Passing a tuple or bytes, the ``charset`` and ``errors`` parameters,
+ and the ``safe_conversion`` parameter, are removed.
+
+ .. versionchanged:: 2.3
+ Which characters remain unquoted is specific to each part of the URL.
+
+ .. versionchanged:: 0.15
+ All reserved characters remain unquoted. Previously, only some reserved
+ characters were left unquoted.
+
+ .. versionchanged:: 0.9.6
+ The ``safe_conversion`` parameter was added.
+
+ .. versionadded:: 0.6
+ """
+ parts = urlsplit(iri)
+ # safe = https://url.spec.whatwg.org/#url-path-segment-string
+ # as well as percent for things that are already quoted
+ path = quote(parts.path, safe="%!$&'()*+,/:;=@")
+ query = quote(parts.query, safe="%!$&'()*+,/:;=?@")
+ fragment = quote(parts.fragment, safe="%!#$&'()*+,/:;=?@")
+
+ if parts.hostname:
+ netloc = parts.hostname.encode("idna").decode("ascii")
+ else:
+ netloc = ""
+
+ if ":" in netloc:
+ netloc = f"[{netloc}]"
+
+ if parts.port:
+ netloc = f"{netloc}:{parts.port}"
+
+ if parts.username:
+ auth = quote(parts.username, safe="%!$&'()*+,;=")
+
+ if parts.password:
+ password = quote(parts.password, safe="%!$&'()*+,;=")
+ auth = f"{auth}:{password}"
+
+ netloc = f"{auth}@{netloc}"
+
+ return urlunsplit((parts.scheme, netloc, path, query, fragment))
+
+
+# Python < 3.12
+# itms-services was worked around in previous iri_to_uri implementations, but
+# we can tell Python directly that it needs to preserve the //.
+if "itms-services" not in urllib.parse.uses_netloc:
+ urllib.parse.uses_netloc.append("itms-services")
+
+
+def _decode_idna(domain: str) -> str:
+ try:
+ data = domain.encode("ascii")
+ except UnicodeEncodeError:
+ # If the domain is not ASCII, it's decoded already.
+ return domain
+
+ try:
+ # Try decoding in one shot.
+ return data.decode("idna")
+ except UnicodeDecodeError:
+ pass
+
+ # Decode each part separately, leaving invalid parts as punycode.
+ parts = []
+
+ for part in data.split(b"."):
+ try:
+ parts.append(part.decode("idna"))
+ except UnicodeDecodeError:
+ parts.append(part.decode("ascii"))
+
+ return ".".join(parts)
+
+
+def _urlencode(query: t.Mapping[str, str] | t.Iterable[tuple[str, str]]) -> str:
+ items = [x for x in iter_multi_items(query) if x[1] is not None]
+ # safe = https://url.spec.whatwg.org/#percent-encoded-bytes
+ return urlencode(items, safe="!$'()*,/:;?@")
diff --git a/venv/lib/python3.8/site-packages/werkzeug/user_agent.py b/venv/lib/python3.8/site-packages/werkzeug/user_agent.py
new file mode 100644
index 0000000..17e5d3f
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/user_agent.py
@@ -0,0 +1,47 @@
+from __future__ import annotations
+
+
+class UserAgent:
+ """Represents a parsed user agent header value.
+
+ The default implementation does no parsing, only the :attr:`string`
+ attribute is set. A subclass may parse the string to set the
+ common attributes or expose other information. Set
+ :attr:`werkzeug.wrappers.Request.user_agent_class` to use a
+ subclass.
+
+ :param string: The header value to parse.
+
+ .. versionadded:: 2.0
+ This replaces the previous ``useragents`` module, but does not
+ provide a built-in parser.
+ """
+
+ platform: str | None = None
+ """The OS name, if it could be parsed from the string."""
+
+ browser: str | None = None
+ """The browser name, if it could be parsed from the string."""
+
+ version: str | None = None
+ """The browser version, if it could be parsed from the string."""
+
+ language: str | None = None
+ """The browser language, if it could be parsed from the string."""
+
+ def __init__(self, string: str) -> None:
+ self.string: str = string
+ """The original header value."""
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.browser}/{self.version}>"
+
+ def __str__(self) -> str:
+ return self.string
+
+ def __bool__(self) -> bool:
+ return bool(self.browser)
+
+ def to_header(self) -> str:
+ """Convert to a header value."""
+ return self.string
diff --git a/venv/lib/python3.8/site-packages/werkzeug/utils.py b/venv/lib/python3.8/site-packages/werkzeug/utils.py
new file mode 100644
index 0000000..59b97b7
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/utils.py
@@ -0,0 +1,691 @@
+from __future__ import annotations
+
+import io
+import mimetypes
+import os
+import pkgutil
+import re
+import sys
+import typing as t
+import unicodedata
+from datetime import datetime
+from time import time
+from urllib.parse import quote
+from zlib import adler32
+
+from markupsafe import escape
+
+from ._internal import _DictAccessorProperty
+from ._internal import _missing
+from ._internal import _TAccessorValue
+from .datastructures import Headers
+from .exceptions import NotFound
+from .exceptions import RequestedRangeNotSatisfiable
+from .security import safe_join
+from .wsgi import wrap_file
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from .wrappers.request import Request
+ from .wrappers.response import Response
+
+_T = t.TypeVar("_T")
+
+_entity_re = re.compile(r"&([^;]+);")
+_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
+_windows_device_files = {
+ "CON",
+ "PRN",
+ "AUX",
+ "NUL",
+ *(f"COM{i}" for i in range(10)),
+ *(f"LPT{i}" for i in range(10)),
+}
+
+
+class cached_property(property, t.Generic[_T]):
+ """A :func:`property` that is only evaluated once. Subsequent access
+ returns the cached value. Setting the property sets the cached
+ value. Deleting the property clears the cached value, accessing it
+ again will evaluate it again.
+
+ .. code-block:: python
+
+ class Example:
+ @cached_property
+ def value(self):
+ # calculate something important here
+ return 42
+
+ e = Example()
+ e.value # evaluates
+ e.value # uses cache
+ e.value = 16 # sets cache
+ del e.value # clears cache
+
+ If the class defines ``__slots__``, it must add ``_cache_{name}`` as
+ a slot. Alternatively, it can add ``__dict__``, but that's usually
+ not desirable.
+
+ .. versionchanged:: 2.1
+ Works with ``__slots__``.
+
+ .. versionchanged:: 2.0
+ ``del obj.name`` clears the cached value.
+ """
+
+ def __init__(
+ self,
+ fget: t.Callable[[t.Any], _T],
+ name: str | None = None,
+ doc: str | None = None,
+ ) -> None:
+ super().__init__(fget, doc=doc)
+ self.__name__ = name or fget.__name__
+ self.slot_name = f"_cache_{self.__name__}"
+ self.__module__ = fget.__module__
+
+ def __set__(self, obj: object, value: _T) -> None:
+ if hasattr(obj, "__dict__"):
+ obj.__dict__[self.__name__] = value
+ else:
+ setattr(obj, self.slot_name, value)
+
+ def __get__(self, obj: object, type: type = None) -> _T: # type: ignore
+ if obj is None:
+ return self # type: ignore
+
+ obj_dict = getattr(obj, "__dict__", None)
+
+ if obj_dict is not None:
+ value: _T = obj_dict.get(self.__name__, _missing)
+ else:
+ value = getattr(obj, self.slot_name, _missing) # type: ignore[arg-type]
+
+ if value is _missing:
+ value = self.fget(obj) # type: ignore
+
+ if obj_dict is not None:
+ obj.__dict__[self.__name__] = value
+ else:
+ setattr(obj, self.slot_name, value)
+
+ return value
+
+ def __delete__(self, obj: object) -> None:
+ if hasattr(obj, "__dict__"):
+ del obj.__dict__[self.__name__]
+ else:
+ setattr(obj, self.slot_name, _missing)
+
+
+class environ_property(_DictAccessorProperty[_TAccessorValue]):
+ """Maps request attributes to environment variables. This works not only
+ for the Werkzeug request object, but also any other class with an
+ environ attribute:
+
+ >>> class Test(object):
+ ... environ = {'key': 'value'}
+ ... test = environ_property('key')
+ >>> var = Test()
+ >>> var.test
+ 'value'
+
+ If you pass it a second value it's used as default if the key does not
+ exist, the third one can be a converter that takes a value and converts
+ it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
+ is used. If no default value is provided `None` is used.
+
+ Per default the property is read only. You have to explicitly enable it
+ by passing ``read_only=False`` to the constructor.
+ """
+
+ read_only = True
+
+ def lookup(self, obj: Request) -> WSGIEnvironment:
+ return obj.environ
+
+
+class header_property(_DictAccessorProperty[_TAccessorValue]):
+ """Like `environ_property` but for headers."""
+
+ def lookup(self, obj: Request | Response) -> Headers:
+ return obj.headers
+
+
+# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
+# https://www.iana.org/assignments/media-types/media-types.xhtml
+# Types listed in the XDG mime info that have a charset in the IANA registration.
+_charset_mimetypes = {
+ "application/ecmascript",
+ "application/javascript",
+ "application/sql",
+ "application/xml",
+ "application/xml-dtd",
+ "application/xml-external-parsed-entity",
+}
+
+
+def get_content_type(mimetype: str, charset: str) -> str:
+ """Returns the full content type string with charset for a mimetype.
+
+ If the mimetype represents text, the charset parameter will be
+ appended, otherwise the mimetype is returned unchanged.
+
+ :param mimetype: The mimetype to be used as content type.
+ :param charset: The charset to be appended for text mimetypes.
+ :return: The content type.
+
+ .. versionchanged:: 0.15
+ Any type that ends with ``+xml`` gets a charset, not just those
+ that start with ``application/``. Known text types such as
+ ``application/javascript`` are also given charsets.
+ """
+ if (
+ mimetype.startswith("text/")
+ or mimetype in _charset_mimetypes
+ or mimetype.endswith("+xml")
+ ):
+ mimetype += f"; charset={charset}"
+
+ return mimetype
+
+
+def secure_filename(filename: str) -> str:
+ r"""Pass it a filename and it will return a secure version of it. This
+ filename can then safely be stored on a regular file system and passed
+ to :func:`os.path.join`. The filename returned is an ASCII only string
+ for maximum portability.
+
+ On windows systems the function also makes sure that the file is not
+ named after one of the special device files.
+
+ >>> secure_filename("My cool movie.mov")
+ 'My_cool_movie.mov'
+ >>> secure_filename("../../../etc/passwd")
+ 'etc_passwd'
+ >>> secure_filename('i contain cool \xfcml\xe4uts.txt')
+ 'i_contain_cool_umlauts.txt'
+
+ The function might return an empty filename. It's your responsibility
+ to ensure that the filename is unique and that you abort or
+ generate a random filename if the function returned an empty one.
+
+ .. versionadded:: 0.5
+
+ :param filename: the filename to secure
+ """
+ filename = unicodedata.normalize("NFKD", filename)
+ filename = filename.encode("ascii", "ignore").decode("ascii")
+
+ for sep in os.sep, os.path.altsep:
+ if sep:
+ filename = filename.replace(sep, " ")
+ filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
+ "._"
+ )
+
+ # on nt a couple of special files are present in each folder. We
+ # have to ensure that the target file is not such a filename. In
+ # this case we prepend an underline
+ if (
+ os.name == "nt"
+ and filename
+ and filename.split(".")[0].upper() in _windows_device_files
+ ):
+ filename = f"_{filename}"
+
+ return filename
+
+
+def redirect(
+ location: str, code: int = 302, Response: type[Response] | None = None
+) -> Response:
+ """Returns a response object (a WSGI application) that, if called,
+ redirects the client to the target location. Supported codes are
+ 301, 302, 303, 305, 307, and 308. 300 is not supported because
+ it's not a real redirect and 304 because it's the answer for a
+ request with a request with defined If-Modified-Since headers.
+
+ .. versionadded:: 0.6
+ The location can now be a unicode string that is encoded using
+ the :func:`iri_to_uri` function.
+
+ .. versionadded:: 0.10
+ The class used for the Response object can now be passed in.
+
+ :param location: the location the response should redirect to.
+ :param code: the redirect status code. defaults to 302.
+ :param class Response: a Response class to use when instantiating a
+ response. The default is :class:`werkzeug.wrappers.Response` if
+ unspecified.
+ """
+ if Response is None:
+ from .wrappers import Response
+
+ html_location = escape(location)
+ response = Response( # type: ignore[misc]
+ "<!doctype html>\n"
+ "<html lang=en>\n"
+ "<title>Redirecting...</title>\n"
+ "<h1>Redirecting...</h1>\n"
+ "<p>You should be redirected automatically to the target URL: "
+ f'<a href="{html_location}">{html_location}</a>. If not, click the link.\n',
+ code,
+ mimetype="text/html",
+ )
+ response.headers["Location"] = location
+ return response
+
+
+def append_slash_redirect(environ: WSGIEnvironment, code: int = 308) -> Response:
+ """Redirect to the current URL with a slash appended.
+
+ If the current URL is ``/user/42``, the redirect URL will be
+ ``42/``. When joined to the current URL during response
+ processing or by the browser, this will produce ``/user/42/``.
+
+ The behavior is undefined if the path ends with a slash already. If
+ called unconditionally on a URL, it may produce a redirect loop.
+
+ :param environ: Use the path and query from this WSGI environment
+ to produce the redirect URL.
+ :param code: the status code for the redirect.
+
+ .. versionchanged:: 2.1
+ Produce a relative URL that only modifies the last segment.
+ Relevant when the current path has multiple segments.
+
+ .. versionchanged:: 2.1
+ The default status code is 308 instead of 301. This preserves
+ the request method and body.
+ """
+ tail = environ["PATH_INFO"].rpartition("/")[2]
+
+ if not tail:
+ new_path = "./"
+ else:
+ new_path = f"{tail}/"
+
+ query_string = environ.get("QUERY_STRING")
+
+ if query_string:
+ new_path = f"{new_path}?{query_string}"
+
+ return redirect(new_path, code)
+
+
+def send_file(
+ path_or_file: os.PathLike[str] | str | t.IO[bytes],
+ environ: WSGIEnvironment,
+ mimetype: str | None = None,
+ as_attachment: bool = False,
+ download_name: str | None = None,
+ conditional: bool = True,
+ etag: bool | str = True,
+ last_modified: datetime | int | float | None = None,
+ max_age: None | (int | t.Callable[[str | None], int | None]) = None,
+ use_x_sendfile: bool = False,
+ response_class: type[Response] | None = None,
+ _root_path: os.PathLike[str] | str | None = None,
+) -> Response:
+ """Send the contents of a file to the client.
+
+ The first argument can be a file path or a file-like object. Paths
+ are preferred in most cases because Werkzeug can manage the file and
+ get extra information from the path. Passing a file-like object
+ requires that the file is opened in binary mode, and is mostly
+ useful when building a file in memory with :class:`io.BytesIO`.
+
+ Never pass file paths provided by a user. The path is assumed to be
+ trusted, so a user could craft a path to access a file you didn't
+ intend. Use :func:`send_from_directory` to safely serve user-provided paths.
+
+ If the WSGI server sets a ``file_wrapper`` in ``environ``, it is
+ used, otherwise Werkzeug's built-in wrapper is used. Alternatively,
+ if the HTTP server supports ``X-Sendfile``, ``use_x_sendfile=True``
+ will tell the server to send the given path, which is much more
+ efficient than reading it in Python.
+
+ :param path_or_file: The path to the file to send, relative to the
+ current working directory if a relative path is given.
+ Alternatively, a file-like object opened in binary mode. Make
+ sure the file pointer is seeked to the start of the data.
+ :param environ: The WSGI environ for the current request.
+ :param mimetype: The MIME type to send for the file. If not
+ provided, it will try to detect it from the file name.
+ :param as_attachment: Indicate to a browser that it should offer to
+ save the file instead of displaying it.
+ :param download_name: The default name browsers will use when saving
+ the file. Defaults to the passed file name.
+ :param conditional: Enable conditional and range responses based on
+ request headers. Requires passing a file path and ``environ``.
+ :param etag: Calculate an ETag for the file, which requires passing
+ a file path. Can also be a string to use instead.
+ :param last_modified: The last modified time to send for the file,
+ in seconds. If not provided, it will try to detect it from the
+ file path.
+ :param max_age: How long the client should cache the file, in
+ seconds. If set, ``Cache-Control`` will be ``public``, otherwise
+ it will be ``no-cache`` to prefer conditional caching.
+ :param use_x_sendfile: Set the ``X-Sendfile`` header to let the
+ server to efficiently send the file. Requires support from the
+ HTTP server. Requires passing a file path.
+ :param response_class: Build the response using this class. Defaults
+ to :class:`~werkzeug.wrappers.Response`.
+ :param _root_path: Do not use. For internal use only. Use
+ :func:`send_from_directory` to safely send files under a path.
+
+ .. versionchanged:: 2.0.2
+ ``send_file`` only sets a detected ``Content-Encoding`` if
+ ``as_attachment`` is disabled.
+
+ .. versionadded:: 2.0
+ Adapted from Flask's implementation.
+
+ .. versionchanged:: 2.0
+ ``download_name`` replaces Flask's ``attachment_filename``
+ parameter. If ``as_attachment=False``, it is passed with
+ ``Content-Disposition: inline`` instead.
+
+ .. versionchanged:: 2.0
+ ``max_age`` replaces Flask's ``cache_timeout`` parameter.
+ ``conditional`` is enabled and ``max_age`` is not set by
+ default.
+
+ .. versionchanged:: 2.0
+ ``etag`` replaces Flask's ``add_etags`` parameter. It can be a
+ string to use instead of generating one.
+
+ .. versionchanged:: 2.0
+ If an encoding is returned when guessing ``mimetype`` from
+ ``download_name``, set the ``Content-Encoding`` header.
+ """
+ if response_class is None:
+ from .wrappers import Response
+
+ response_class = Response
+
+ path: str | None = None
+ file: t.IO[bytes] | None = None
+ size: int | None = None
+ mtime: float | None = None
+ headers = Headers()
+
+ if isinstance(path_or_file, (os.PathLike, str)) or hasattr(
+ path_or_file, "__fspath__"
+ ):
+ path_or_file = t.cast("t.Union[os.PathLike[str], str]", path_or_file)
+
+ # Flask will pass app.root_path, allowing its send_file wrapper
+ # to not have to deal with paths.
+ if _root_path is not None:
+ path = os.path.join(_root_path, path_or_file)
+ else:
+ path = os.path.abspath(path_or_file)
+
+ stat = os.stat(path)
+ size = stat.st_size
+ mtime = stat.st_mtime
+ else:
+ file = path_or_file
+
+ if download_name is None and path is not None:
+ download_name = os.path.basename(path)
+
+ if mimetype is None:
+ if download_name is None:
+ raise TypeError(
+ "Unable to detect the MIME type because a file name is"
+ " not available. Either set 'download_name', pass a"
+ " path instead of a file, or set 'mimetype'."
+ )
+
+ mimetype, encoding = mimetypes.guess_type(download_name)
+
+ if mimetype is None:
+ mimetype = "application/octet-stream"
+
+ # Don't send encoding for attachments, it causes browsers to
+ # save decompress tar.gz files.
+ if encoding is not None and not as_attachment:
+ headers.set("Content-Encoding", encoding)
+
+ if download_name is not None:
+ try:
+ download_name.encode("ascii")
+ except UnicodeEncodeError:
+ simple = unicodedata.normalize("NFKD", download_name)
+ simple = simple.encode("ascii", "ignore").decode("ascii")
+ # safe = RFC 5987 attr-char
+ quoted = quote(download_name, safe="!#$&+-.^_`|~")
+ names = {"filename": simple, "filename*": f"UTF-8''{quoted}"}
+ else:
+ names = {"filename": download_name}
+
+ value = "attachment" if as_attachment else "inline"
+ headers.set("Content-Disposition", value, **names)
+ elif as_attachment:
+ raise TypeError(
+ "No name provided for attachment. Either set"
+ " 'download_name' or pass a path instead of a file."
+ )
+
+ if use_x_sendfile and path is not None:
+ headers["X-Sendfile"] = path
+ data = None
+ else:
+ if file is None:
+ file = open(path, "rb") # type: ignore
+ elif isinstance(file, io.BytesIO):
+ size = file.getbuffer().nbytes
+ elif isinstance(file, io.TextIOBase):
+ raise ValueError("Files must be opened in binary mode or use BytesIO.")
+
+ data = wrap_file(environ, file)
+
+ rv = response_class(
+ data, mimetype=mimetype, headers=headers, direct_passthrough=True
+ )
+
+ if size is not None:
+ rv.content_length = size
+
+ if last_modified is not None:
+ rv.last_modified = last_modified # type: ignore
+ elif mtime is not None:
+ rv.last_modified = mtime # type: ignore
+
+ rv.cache_control.no_cache = True
+
+ # Flask will pass app.get_send_file_max_age, allowing its send_file
+ # wrapper to not have to deal with paths.
+ if callable(max_age):
+ max_age = max_age(path)
+
+ if max_age is not None:
+ if max_age > 0:
+ rv.cache_control.no_cache = None
+ rv.cache_control.public = True
+
+ rv.cache_control.max_age = max_age
+ rv.expires = int(time() + max_age) # type: ignore
+
+ if isinstance(etag, str):
+ rv.set_etag(etag)
+ elif etag and path is not None:
+ check = adler32(path.encode()) & 0xFFFFFFFF
+ rv.set_etag(f"{mtime}-{size}-{check}")
+
+ if conditional:
+ try:
+ rv = rv.make_conditional(environ, accept_ranges=True, complete_length=size)
+ except RequestedRangeNotSatisfiable:
+ if file is not None:
+ file.close()
+
+ raise
+
+ # Some x-sendfile implementations incorrectly ignore the 304
+ # status code and send the file anyway.
+ if rv.status_code == 304:
+ rv.headers.pop("x-sendfile", None)
+
+ return rv
+
+
+def send_from_directory(
+ directory: os.PathLike[str] | str,
+ path: os.PathLike[str] | str,
+ environ: WSGIEnvironment,
+ **kwargs: t.Any,
+) -> Response:
+ """Send a file from within a directory using :func:`send_file`.
+
+ This is a secure way to serve files from a folder, such as static
+ files or uploads. Uses :func:`~werkzeug.security.safe_join` to
+ ensure the path coming from the client is not maliciously crafted to
+ point outside the specified directory.
+
+ If the final path does not point to an existing regular file,
+ returns a 404 :exc:`~werkzeug.exceptions.NotFound` error.
+
+ :param directory: The directory that ``path`` must be located under. This *must not*
+ be a value provided by the client, otherwise it becomes insecure.
+ :param path: The path to the file to send, relative to ``directory``. This is the
+ part of the path provided by the client, which is checked for security.
+ :param environ: The WSGI environ for the current request.
+ :param kwargs: Arguments to pass to :func:`send_file`.
+
+ .. versionadded:: 2.0
+ Adapted from Flask's implementation.
+ """
+ path_str = safe_join(os.fspath(directory), os.fspath(path))
+
+ if path_str is None:
+ raise NotFound()
+
+ # Flask will pass app.root_path, allowing its send_from_directory
+ # wrapper to not have to deal with paths.
+ if "_root_path" in kwargs:
+ path_str = os.path.join(kwargs["_root_path"], path_str)
+
+ if not os.path.isfile(path_str):
+ raise NotFound()
+
+ return send_file(path_str, environ, **kwargs)
+
+
+def import_string(import_name: str, silent: bool = False) -> t.Any:
+ """Imports an object based on a string. This is useful if you want to
+ use import paths as endpoints or something similar. An import path can
+ be specified either in dotted notation (``xml.sax.saxutils.escape``)
+ or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+ If `silent` is True the return value will be `None` if the import fails.
+
+ :param import_name: the dotted name for the object to import.
+ :param silent: if set to `True` import errors are ignored and
+ `None` is returned instead.
+ :return: imported object
+ """
+ import_name = import_name.replace(":", ".")
+ try:
+ try:
+ __import__(import_name)
+ except ImportError:
+ if "." not in import_name:
+ raise
+ else:
+ return sys.modules[import_name]
+
+ module_name, obj_name = import_name.rsplit(".", 1)
+ module = __import__(module_name, globals(), locals(), [obj_name])
+ try:
+ return getattr(module, obj_name)
+ except AttributeError as e:
+ raise ImportError(e) from None
+
+ except ImportError as e:
+ if not silent:
+ raise ImportStringError(import_name, e).with_traceback(
+ sys.exc_info()[2]
+ ) from None
+
+ return None
+
+
+def find_modules(
+ import_path: str, include_packages: bool = False, recursive: bool = False
+) -> t.Iterator[str]:
+ """Finds all the modules below a package. This can be useful to
+ automatically import all views / controllers so that their metaclasses /
+ function decorators have a chance to register themselves on the
+ application.
+
+ Packages are not returned unless `include_packages` is `True`. This can
+ also recursively list modules but in that case it will import all the
+ packages to get the correct load path of that module.
+
+ :param import_path: the dotted name for the package to find child modules.
+ :param include_packages: set to `True` if packages should be returned, too.
+ :param recursive: set to `True` if recursion should happen.
+ :return: generator
+ """
+ module = import_string(import_path)
+ path = getattr(module, "__path__", None)
+ if path is None:
+ raise ValueError(f"{import_path!r} is not a package")
+ basename = f"{module.__name__}."
+ for _importer, modname, ispkg in pkgutil.iter_modules(path):
+ modname = basename + modname
+ if ispkg:
+ if include_packages:
+ yield modname
+ if recursive:
+ yield from find_modules(modname, include_packages, True)
+ else:
+ yield modname
+
+
+class ImportStringError(ImportError):
+ """Provides information about a failed :func:`import_string` attempt."""
+
+ #: String in dotted notation that failed to be imported.
+ import_name: str
+ #: Wrapped exception.
+ exception: BaseException
+
+ def __init__(self, import_name: str, exception: BaseException) -> None:
+ self.import_name = import_name
+ self.exception = exception
+ msg = import_name
+ name = ""
+ tracked = []
+ for part in import_name.replace(":", ".").split("."):
+ name = f"{name}.{part}" if name else part
+ imported = import_string(name, silent=True)
+ if imported:
+ tracked.append((name, getattr(imported, "__file__", None)))
+ else:
+ track = [f"- {n!r} found in {i!r}." for n, i in tracked]
+ track.append(f"- {name!r} not found.")
+ track_str = "\n".join(track)
+ msg = (
+ f"import_string() failed for {import_name!r}. Possible reasons"
+ f" are:\n\n"
+ "- missing __init__.py in a package;\n"
+ "- package or module path not included in sys.path;\n"
+ "- duplicated package or module name taking precedence in"
+ " sys.path;\n"
+ "- missing module, class, function or variable;\n\n"
+ f"Debugged import:\n\n{track_str}\n\n"
+ f"Original exception:\n\n{type(exception).__name__}: {exception}"
+ )
+ break
+
+ super().__init__(msg)
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__}({self.import_name!r}, {self.exception!r})>"
diff --git a/venv/lib/python3.8/site-packages/werkzeug/wrappers/__init__.py b/venv/lib/python3.8/site-packages/werkzeug/wrappers/__init__.py
new file mode 100644
index 0000000..b36f228
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/wrappers/__init__.py
@@ -0,0 +1,3 @@
+from .request import Request as Request
+from .response import Response as Response
+from .response import ResponseStream as ResponseStream
diff --git a/venv/lib/python3.8/site-packages/werkzeug/wrappers/request.py b/venv/lib/python3.8/site-packages/werkzeug/wrappers/request.py
new file mode 100644
index 0000000..344f28b
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/wrappers/request.py
@@ -0,0 +1,647 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import functools
+import json
+import typing as t
+from io import BytesIO
+
+from .._internal import _wsgi_decoding_dance
+from ..datastructures import CombinedMultiDict
+from ..datastructures import EnvironHeaders
+from ..datastructures import FileStorage
+from ..datastructures import ImmutableMultiDict
+from ..datastructures import iter_multi_items
+from ..datastructures import MultiDict
+from ..exceptions import BadRequest
+from ..exceptions import UnsupportedMediaType
+from ..formparser import default_stream_factory
+from ..formparser import FormDataParser
+from ..sansio.request import Request as _SansIORequest
+from ..utils import cached_property
+from ..utils import environ_property
+from ..wsgi import _get_server
+from ..wsgi import get_input_stream
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+class Request(_SansIORequest):
+ """Represents an incoming WSGI HTTP request, with headers and body
+ taken from the WSGI environment. Has properties and methods for
+ using the functionality defined by various HTTP specs. The data in
+ requests object is read-only.
+
+ Text data is assumed to use UTF-8 encoding, which should be true for
+ the vast majority of modern clients. Using an encoding set by the
+ client is unsafe in Python due to extra encodings it provides, such
+ as ``zip``. To change the assumed encoding, subclass and replace
+ :attr:`charset`.
+
+ :param environ: The WSGI environ is generated by the WSGI server and
+ contains information about the server configuration and client
+ request.
+ :param populate_request: Add this request object to the WSGI environ
+ as ``environ['werkzeug.request']``. Can be useful when
+ debugging.
+ :param shallow: Makes reading from :attr:`stream` (and any method
+ that would read from it) raise a :exc:`RuntimeError`. Useful to
+ prevent consuming the form data in middleware, which would make
+ it unavailable to the final application.
+
+ .. versionchanged:: 3.0
+ The ``charset``, ``url_charset``, and ``encoding_errors`` parameters
+ were removed.
+
+ .. versionchanged:: 2.1
+ Old ``BaseRequest`` and mixin classes were removed.
+
+ .. versionchanged:: 2.1
+ Remove the ``disable_data_descriptor`` attribute.
+
+ .. versionchanged:: 2.0
+ Combine ``BaseRequest`` and mixins into a single ``Request``
+ class.
+
+ .. versionchanged:: 0.5
+ Read-only mode is enforced with immutable classes for all data.
+ """
+
+ #: the maximum content length. This is forwarded to the form data
+ #: parsing function (:func:`parse_form_data`). When set and the
+ #: :attr:`form` or :attr:`files` attribute is accessed and the
+ #: parsing fails because more than the specified value is transmitted
+ #: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
+ #:
+ #: .. versionadded:: 0.5
+ max_content_length: int | None = None
+
+ #: the maximum form field size. This is forwarded to the form data
+ #: parsing function (:func:`parse_form_data`). When set and the
+ #: :attr:`form` or :attr:`files` attribute is accessed and the
+ #: data in memory for post data is longer than the specified value a
+ #: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
+ #:
+ #: .. versionadded:: 0.5
+ max_form_memory_size: int | None = None
+
+ #: The maximum number of multipart parts to parse, passed to
+ #: :attr:`form_data_parser_class`. Parsing form data with more than this
+ #: many parts will raise :exc:`~.RequestEntityTooLarge`.
+ #:
+ #: .. versionadded:: 2.2.3
+ max_form_parts = 1000
+
+ #: The form data parser that should be used. Can be replaced to customize
+ #: the form date parsing.
+ form_data_parser_class: type[FormDataParser] = FormDataParser
+
+ #: The WSGI environment containing HTTP headers and information from
+ #: the WSGI server.
+ environ: WSGIEnvironment
+
+ #: Set when creating the request object. If ``True``, reading from
+ #: the request body will cause a ``RuntimeException``. Useful to
+ #: prevent modifying the stream from middleware.
+ shallow: bool
+
+ def __init__(
+ self,
+ environ: WSGIEnvironment,
+ populate_request: bool = True,
+ shallow: bool = False,
+ ) -> None:
+ super().__init__(
+ method=environ.get("REQUEST_METHOD", "GET"),
+ scheme=environ.get("wsgi.url_scheme", "http"),
+ server=_get_server(environ),
+ root_path=_wsgi_decoding_dance(environ.get("SCRIPT_NAME") or ""),
+ path=_wsgi_decoding_dance(environ.get("PATH_INFO") or ""),
+ query_string=environ.get("QUERY_STRING", "").encode("latin1"),
+ headers=EnvironHeaders(environ),
+ remote_addr=environ.get("REMOTE_ADDR"),
+ )
+ self.environ = environ
+ self.shallow = shallow
+
+ if populate_request and not shallow:
+ self.environ["werkzeug.request"] = self
+
+ @classmethod
+ def from_values(cls, *args: t.Any, **kwargs: t.Any) -> Request:
+ """Create a new request object based on the values provided. If
+ environ is given missing values are filled from there. This method is
+ useful for small scripts when you need to simulate a request from an URL.
+ Do not use this method for unittesting, there is a full featured client
+ object (:class:`Client`) that allows to create multipart requests,
+ support for cookies etc.
+
+ This accepts the same options as the
+ :class:`~werkzeug.test.EnvironBuilder`.
+
+ .. versionchanged:: 0.5
+ This method now accepts the same arguments as
+ :class:`~werkzeug.test.EnvironBuilder`. Because of this the
+ `environ` parameter is now called `environ_overrides`.
+
+ :return: request object
+ """
+ from ..test import EnvironBuilder
+
+ builder = EnvironBuilder(*args, **kwargs)
+ try:
+ return builder.get_request(cls)
+ finally:
+ builder.close()
+
+ @classmethod
+ def application(cls, f: t.Callable[[Request], WSGIApplication]) -> WSGIApplication:
+ """Decorate a function as responder that accepts the request as
+ the last argument. This works like the :func:`responder`
+ decorator but the function is passed the request object as the
+ last argument and the request object will be closed
+ automatically::
+
+ @Request.application
+ def my_wsgi_app(request):
+ return Response('Hello World!')
+
+ As of Werkzeug 0.14 HTTP exceptions are automatically caught and
+ converted to responses instead of failing.
+
+ :param f: the WSGI callable to decorate
+ :return: a new WSGI callable
+ """
+ #: return a callable that wraps the -2nd argument with the request
+ #: and calls the function with all the arguments up to that one and
+ #: the request. The return value is then called with the latest
+ #: two arguments. This makes it possible to use this decorator for
+ #: both standalone WSGI functions as well as bound methods and
+ #: partially applied functions.
+ from ..exceptions import HTTPException
+
+ @functools.wraps(f)
+ def application(*args: t.Any) -> cabc.Iterable[bytes]:
+ request = cls(args[-2])
+ with request:
+ try:
+ resp = f(*args[:-2] + (request,))
+ except HTTPException as e:
+ resp = t.cast("WSGIApplication", e.get_response(args[-2]))
+ return resp(*args[-2:])
+
+ return t.cast("WSGIApplication", application)
+
+ def _get_file_stream(
+ self,
+ total_content_length: int | None,
+ content_type: str | None,
+ filename: str | None = None,
+ content_length: int | None = None,
+ ) -> t.IO[bytes]:
+ """Called to get a stream for the file upload.
+
+ This must provide a file-like class with `read()`, `readline()`
+ and `seek()` methods that is both writeable and readable.
+
+ The default implementation returns a temporary file if the total
+ content length is higher than 500KB. Because many browsers do not
+ provide a content length for the files only the total content
+ length matters.
+
+ :param total_content_length: the total content length of all the
+ data in the request combined. This value
+ is guaranteed to be there.
+ :param content_type: the mimetype of the uploaded file.
+ :param filename: the filename of the uploaded file. May be `None`.
+ :param content_length: the length of this file. This value is usually
+ not provided because webbrowsers do not provide
+ this value.
+ """
+ return default_stream_factory(
+ total_content_length=total_content_length,
+ filename=filename,
+ content_type=content_type,
+ content_length=content_length,
+ )
+
+ @property
+ def want_form_data_parsed(self) -> bool:
+ """``True`` if the request method carries content. By default
+ this is true if a ``Content-Type`` is sent.
+
+ .. versionadded:: 0.8
+ """
+ return bool(self.environ.get("CONTENT_TYPE"))
+
+ def make_form_data_parser(self) -> FormDataParser:
+ """Creates the form data parser. Instantiates the
+ :attr:`form_data_parser_class` with some parameters.
+
+ .. versionadded:: 0.8
+ """
+ return self.form_data_parser_class(
+ stream_factory=self._get_file_stream,
+ max_form_memory_size=self.max_form_memory_size,
+ max_content_length=self.max_content_length,
+ max_form_parts=self.max_form_parts,
+ cls=self.parameter_storage_class,
+ )
+
+ def _load_form_data(self) -> None:
+ """Method used internally to retrieve submitted data. After calling
+ this sets `form` and `files` on the request object to multi dicts
+ filled with the incoming form data. As a matter of fact the input
+ stream will be empty afterwards. You can also call this method to
+ force the parsing of the form data.
+
+ .. versionadded:: 0.8
+ """
+ # abort early if we have already consumed the stream
+ if "form" in self.__dict__:
+ return
+
+ if self.want_form_data_parsed:
+ parser = self.make_form_data_parser()
+ data = parser.parse(
+ self._get_stream_for_parsing(),
+ self.mimetype,
+ self.content_length,
+ self.mimetype_params,
+ )
+ else:
+ data = (
+ self.stream,
+ self.parameter_storage_class(),
+ self.parameter_storage_class(),
+ )
+
+ # inject the values into the instance dict so that we bypass
+ # our cached_property non-data descriptor.
+ d = self.__dict__
+ d["stream"], d["form"], d["files"] = data
+
+ def _get_stream_for_parsing(self) -> t.IO[bytes]:
+ """This is the same as accessing :attr:`stream` with the difference
+ that if it finds cached data from calling :meth:`get_data` first it
+ will create a new stream out of the cached data.
+
+ .. versionadded:: 0.9.3
+ """
+ cached_data = getattr(self, "_cached_data", None)
+ if cached_data is not None:
+ return BytesIO(cached_data)
+ return self.stream
+
+ def close(self) -> None:
+ """Closes associated resources of this request object. This
+ closes all file handles explicitly. You can also use the request
+ object in a with statement which will automatically close it.
+
+ .. versionadded:: 0.9
+ """
+ files = self.__dict__.get("files")
+ for _key, value in iter_multi_items(files or ()):
+ value.close()
+
+ def __enter__(self) -> Request:
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb) -> None: # type: ignore
+ self.close()
+
+ @cached_property
+ def stream(self) -> t.IO[bytes]:
+ """The WSGI input stream, with safety checks. This stream can only be consumed
+ once.
+
+ Use :meth:`get_data` to get the full data as bytes or text. The :attr:`data`
+ attribute will contain the full bytes only if they do not represent form data.
+ The :attr:`form` attribute will contain the parsed form data in that case.
+
+ Unlike :attr:`input_stream`, this stream guards against infinite streams or
+ reading past :attr:`content_length` or :attr:`max_content_length`.
+
+ If ``max_content_length`` is set, it can be enforced on streams if
+ ``wsgi.input_terminated`` is set. Otherwise, an empty stream is returned.
+
+ If the limit is reached before the underlying stream is exhausted (such as a
+ file that is too large, or an infinite stream), the remaining contents of the
+ stream cannot be read safely. Depending on how the server handles this, clients
+ may show a "connection reset" failure instead of seeing the 413 response.
+
+ .. versionchanged:: 2.3
+ Check ``max_content_length`` preemptively and while reading.
+
+ .. versionchanged:: 0.9
+ The stream is always set (but may be consumed) even if form parsing was
+ accessed first.
+ """
+ if self.shallow:
+ raise RuntimeError(
+ "This request was created with 'shallow=True', reading"
+ " from the input stream is disabled."
+ )
+
+ return get_input_stream(
+ self.environ, max_content_length=self.max_content_length
+ )
+
+ input_stream = environ_property[t.IO[bytes]](
+ "wsgi.input",
+ doc="""The raw WSGI input stream, without any safety checks.
+
+ This is dangerous to use. It does not guard against infinite streams or reading
+ past :attr:`content_length` or :attr:`max_content_length`.
+
+ Use :attr:`stream` instead.
+ """,
+ )
+
+ @cached_property
+ def data(self) -> bytes:
+ """The raw data read from :attr:`stream`. Will be empty if the request
+ represents form data.
+
+ To get the raw data even if it represents form data, use :meth:`get_data`.
+ """
+ return self.get_data(parse_form_data=True)
+
+ @t.overload
+ def get_data(
+ self,
+ cache: bool = True,
+ as_text: t.Literal[False] = False,
+ parse_form_data: bool = False,
+ ) -> bytes: ...
+
+ @t.overload
+ def get_data(
+ self,
+ cache: bool = True,
+ as_text: t.Literal[True] = ...,
+ parse_form_data: bool = False,
+ ) -> str: ...
+
+ def get_data(
+ self, cache: bool = True, as_text: bool = False, parse_form_data: bool = False
+ ) -> bytes | str:
+ """This reads the buffered incoming data from the client into one
+ bytes object. By default this is cached but that behavior can be
+ changed by setting `cache` to `False`.
+
+ Usually it's a bad idea to call this method without checking the
+ content length first as a client could send dozens of megabytes or more
+ to cause memory problems on the server.
+
+ Note that if the form data was already parsed this method will not
+ return anything as form data parsing does not cache the data like
+ this method does. To implicitly invoke form data parsing function
+ set `parse_form_data` to `True`. When this is done the return value
+ of this method will be an empty string if the form parser handles
+ the data. This generally is not necessary as if the whole data is
+ cached (which is the default) the form parser will used the cached
+ data to parse the form data. Please be generally aware of checking
+ the content length first in any case before calling this method
+ to avoid exhausting server memory.
+
+ If `as_text` is set to `True` the return value will be a decoded
+ string.
+
+ .. versionadded:: 0.9
+ """
+ rv = getattr(self, "_cached_data", None)
+ if rv is None:
+ if parse_form_data:
+ self._load_form_data()
+ rv = self.stream.read()
+ if cache:
+ self._cached_data = rv
+ if as_text:
+ rv = rv.decode(errors="replace")
+ return rv
+
+ @cached_property
+ def form(self) -> ImmutableMultiDict[str, str]:
+ """The form parameters. By default an
+ :class:`~werkzeug.datastructures.ImmutableMultiDict`
+ is returned from this function. This can be changed by setting
+ :attr:`parameter_storage_class` to a different type. This might
+ be necessary if the order of the form data is important.
+
+ Please keep in mind that file uploads will not end up here, but instead
+ in the :attr:`files` attribute.
+
+ .. versionchanged:: 0.9
+
+ Previous to Werkzeug 0.9 this would only contain form data for POST
+ and PUT requests.
+ """
+ self._load_form_data()
+ return self.form
+
+ @cached_property
+ def values(self) -> CombinedMultiDict[str, str]:
+ """A :class:`werkzeug.datastructures.CombinedMultiDict` that
+ combines :attr:`args` and :attr:`form`.
+
+ For GET requests, only ``args`` are present, not ``form``.
+
+ .. versionchanged:: 2.0
+ For GET requests, only ``args`` are present, not ``form``.
+ """
+ sources = [self.args]
+
+ if self.method != "GET":
+ # GET requests can have a body, and some caching proxies
+ # might not treat that differently than a normal GET
+ # request, allowing form data to "invisibly" affect the
+ # cache without indication in the query string / URL.
+ sources.append(self.form)
+
+ args = []
+
+ for d in sources:
+ if not isinstance(d, MultiDict):
+ d = MultiDict(d)
+
+ args.append(d)
+
+ return CombinedMultiDict(args)
+
+ @cached_property
+ def files(self) -> ImmutableMultiDict[str, FileStorage]:
+ """:class:`~werkzeug.datastructures.MultiDict` object containing
+ all uploaded files. Each key in :attr:`files` is the name from the
+ ``<input type="file" name="">``. Each value in :attr:`files` is a
+ Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
+
+ It basically behaves like a standard file object you know from Python,
+ with the difference that it also has a
+ :meth:`~werkzeug.datastructures.FileStorage.save` function that can
+ store the file on the filesystem.
+
+ Note that :attr:`files` will only contain data if the request method was
+ POST, PUT or PATCH and the ``<form>`` that posted to the request had
+ ``enctype="multipart/form-data"``. It will be empty otherwise.
+
+ See the :class:`~werkzeug.datastructures.MultiDict` /
+ :class:`~werkzeug.datastructures.FileStorage` documentation for
+ more details about the used data structure.
+ """
+ self._load_form_data()
+ return self.files
+
+ @property
+ def script_root(self) -> str:
+ """Alias for :attr:`self.root_path`. ``environ["SCRIPT_ROOT"]``
+ without a trailing slash.
+ """
+ return self.root_path
+
+ @cached_property
+ def url_root(self) -> str:
+ """Alias for :attr:`root_url`. The URL with scheme, host, and
+ root path. For example, ``https://example.com/app/``.
+ """
+ return self.root_url
+
+ remote_user = environ_property[str](
+ "REMOTE_USER",
+ doc="""If the server supports user authentication, and the
+ script is protected, this attribute contains the username the
+ user has authenticated as.""",
+ )
+ is_multithread = environ_property[bool](
+ "wsgi.multithread",
+ doc="""boolean that is `True` if the application is served by a
+ multithreaded WSGI server.""",
+ )
+ is_multiprocess = environ_property[bool](
+ "wsgi.multiprocess",
+ doc="""boolean that is `True` if the application is served by a
+ WSGI server that spawns multiple processes.""",
+ )
+ is_run_once = environ_property[bool](
+ "wsgi.run_once",
+ doc="""boolean that is `True` if the application will be
+ executed only once in a process lifetime. This is the case for
+ CGI for example, but it's not guaranteed that the execution only
+ happens one time.""",
+ )
+
+ # JSON
+
+ #: A module or other object that has ``dumps`` and ``loads``
+ #: functions that match the API of the built-in :mod:`json` module.
+ json_module = json
+
+ @property
+ def json(self) -> t.Any | None:
+ """The parsed JSON data if :attr:`mimetype` indicates JSON
+ (:mimetype:`application/json`, see :attr:`is_json`).
+
+ Calls :meth:`get_json` with default arguments.
+
+ If the request content type is not ``application/json``, this
+ will raise a 415 Unsupported Media Type error.
+
+ .. versionchanged:: 2.3
+ Raise a 415 error instead of 400.
+
+ .. versionchanged:: 2.1
+ Raise a 400 error if the content type is incorrect.
+ """
+ return self.get_json()
+
+ # Cached values for ``(silent=False, silent=True)``. Initialized
+ # with sentinel values.
+ _cached_json: tuple[t.Any, t.Any] = (Ellipsis, Ellipsis)
+
+ @t.overload
+ def get_json(
+ self, force: bool = ..., silent: t.Literal[False] = ..., cache: bool = ...
+ ) -> t.Any: ...
+
+ @t.overload
+ def get_json(
+ self, force: bool = ..., silent: bool = ..., cache: bool = ...
+ ) -> t.Any | None: ...
+
+ def get_json(
+ self, force: bool = False, silent: bool = False, cache: bool = True
+ ) -> t.Any | None:
+ """Parse :attr:`data` as JSON.
+
+ If the mimetype does not indicate JSON
+ (:mimetype:`application/json`, see :attr:`is_json`), or parsing
+ fails, :meth:`on_json_loading_failed` is called and
+ its return value is used as the return value. By default this
+ raises a 415 Unsupported Media Type resp.
+
+ :param force: Ignore the mimetype and always try to parse JSON.
+ :param silent: Silence mimetype and parsing errors, and
+ return ``None`` instead.
+ :param cache: Store the parsed JSON to return for subsequent
+ calls.
+
+ .. versionchanged:: 2.3
+ Raise a 415 error instead of 400.
+
+ .. versionchanged:: 2.1
+ Raise a 400 error if the content type is incorrect.
+ """
+ if cache and self._cached_json[silent] is not Ellipsis:
+ return self._cached_json[silent]
+
+ if not (force or self.is_json):
+ if not silent:
+ return self.on_json_loading_failed(None)
+ else:
+ return None
+
+ data = self.get_data(cache=cache)
+
+ try:
+ rv = self.json_module.loads(data)
+ except ValueError as e:
+ if silent:
+ rv = None
+
+ if cache:
+ normal_rv, _ = self._cached_json
+ self._cached_json = (normal_rv, rv)
+ else:
+ rv = self.on_json_loading_failed(e)
+
+ if cache:
+ _, silent_rv = self._cached_json
+ self._cached_json = (rv, silent_rv)
+ else:
+ if cache:
+ self._cached_json = (rv, rv)
+
+ return rv
+
+ def on_json_loading_failed(self, e: ValueError | None) -> t.Any:
+ """Called if :meth:`get_json` fails and isn't silenced.
+
+ If this method returns a value, it is used as the return value
+ for :meth:`get_json`. The default implementation raises
+ :exc:`~werkzeug.exceptions.BadRequest`.
+
+ :param e: If parsing failed, this is the exception. It will be
+ ``None`` if the content type wasn't ``application/json``.
+
+ .. versionchanged:: 2.3
+ Raise a 415 error instead of 400.
+ """
+ if e is not None:
+ raise BadRequest(f"Failed to decode JSON object: {e}")
+
+ raise UnsupportedMediaType(
+ "Did not attempt to load JSON data because the request"
+ " Content-Type was not 'application/json'."
+ )
diff --git a/venv/lib/python3.8/site-packages/werkzeug/wrappers/response.py b/venv/lib/python3.8/site-packages/werkzeug/wrappers/response.py
new file mode 100644
index 0000000..7f01287
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/wrappers/response.py
@@ -0,0 +1,831 @@
+from __future__ import annotations
+
+import json
+import typing as t
+from http import HTTPStatus
+from urllib.parse import urljoin
+
+from .._internal import _get_environ
+from ..datastructures import Headers
+from ..http import generate_etag
+from ..http import http_date
+from ..http import is_resource_modified
+from ..http import parse_etags
+from ..http import parse_range_header
+from ..http import remove_entity_headers
+from ..sansio.response import Response as _SansIOResponse
+from ..urls import iri_to_uri
+from ..utils import cached_property
+from ..wsgi import _RangeWrapper
+from ..wsgi import ClosingIterator
+from ..wsgi import get_current_url
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from .request import Request
+
+
+def _iter_encoded(iterable: t.Iterable[str | bytes]) -> t.Iterator[bytes]:
+ for item in iterable:
+ if isinstance(item, str):
+ yield item.encode()
+ else:
+ yield item
+
+
+class Response(_SansIOResponse):
+ """Represents an outgoing WSGI HTTP response with body, status, and
+ headers. Has properties and methods for using the functionality
+ defined by various HTTP specs.
+
+ The response body is flexible to support different use cases. The
+ simple form is passing bytes, or a string which will be encoded as
+ UTF-8. Passing an iterable of bytes or strings makes this a
+ streaming response. A generator is particularly useful for building
+ a CSV file in memory or using SSE (Server Sent Events). A file-like
+ object is also iterable, although the
+ :func:`~werkzeug.utils.send_file` helper should be used in that
+ case.
+
+ The response object is itself a WSGI application callable. When
+ called (:meth:`__call__`) with ``environ`` and ``start_response``,
+ it will pass its status and headers to ``start_response`` then
+ return its body as an iterable.
+
+ .. code-block:: python
+
+ from werkzeug.wrappers.response import Response
+
+ def index():
+ return Response("Hello, World!")
+
+ def application(environ, start_response):
+ path = environ.get("PATH_INFO") or "/"
+
+ if path == "/":
+ response = index()
+ else:
+ response = Response("Not Found", status=404)
+
+ return response(environ, start_response)
+
+ :param response: The data for the body of the response. A string or
+ bytes, or tuple or list of strings or bytes, for a fixed-length
+ response, or any other iterable of strings or bytes for a
+ streaming response. Defaults to an empty body.
+ :param status: The status code for the response. Either an int, in
+ which case the default status message is added, or a string in
+ the form ``{code} {message}``, like ``404 Not Found``. Defaults
+ to 200.
+ :param headers: A :class:`~werkzeug.datastructures.Headers` object,
+ or a list of ``(key, value)`` tuples that will be converted to a
+ ``Headers`` object.
+ :param mimetype: The mime type (content type without charset or
+ other parameters) of the response. If the value starts with
+ ``text/`` (or matches some other special cases), the charset
+ will be added to create the ``content_type``.
+ :param content_type: The full content type of the response.
+ Overrides building the value from ``mimetype``.
+ :param direct_passthrough: Pass the response body directly through
+ as the WSGI iterable. This can be used when the body is a binary
+ file or other iterator of bytes, to skip some unnecessary
+ checks. Use :func:`~werkzeug.utils.send_file` instead of setting
+ this manually.
+
+ .. versionchanged:: 2.1
+ Old ``BaseResponse`` and mixin classes were removed.
+
+ .. versionchanged:: 2.0
+ Combine ``BaseResponse`` and mixins into a single ``Response``
+ class.
+
+ .. versionchanged:: 0.5
+ The ``direct_passthrough`` parameter was added.
+ """
+
+ #: if set to `False` accessing properties on the response object will
+ #: not try to consume the response iterator and convert it into a list.
+ #:
+ #: .. versionadded:: 0.6.2
+ #:
+ #: That attribute was previously called `implicit_seqence_conversion`.
+ #: (Notice the typo). If you did use this feature, you have to adapt
+ #: your code to the name change.
+ implicit_sequence_conversion = True
+
+ #: If a redirect ``Location`` header is a relative URL, make it an
+ #: absolute URL, including scheme and domain.
+ #:
+ #: .. versionchanged:: 2.1
+ #: This is disabled by default, so responses will send relative
+ #: redirects.
+ #:
+ #: .. versionadded:: 0.8
+ autocorrect_location_header = False
+
+ #: Should this response object automatically set the content-length
+ #: header if possible? This is true by default.
+ #:
+ #: .. versionadded:: 0.8
+ automatically_set_content_length = True
+
+ #: The response body to send as the WSGI iterable. A list of strings
+ #: or bytes represents a fixed-length response, any other iterable
+ #: is a streaming response. Strings are encoded to bytes as UTF-8.
+ #:
+ #: Do not set to a plain string or bytes, that will cause sending
+ #: the response to be very inefficient as it will iterate one byte
+ #: at a time.
+ response: t.Iterable[str] | t.Iterable[bytes]
+
+ def __init__(
+ self,
+ response: t.Iterable[bytes] | bytes | t.Iterable[str] | str | None = None,
+ status: int | str | HTTPStatus | None = None,
+ headers: t.Mapping[str, str | t.Iterable[str]]
+ | t.Iterable[tuple[str, str]]
+ | None = None,
+ mimetype: str | None = None,
+ content_type: str | None = None,
+ direct_passthrough: bool = False,
+ ) -> None:
+ super().__init__(
+ status=status,
+ headers=headers,
+ mimetype=mimetype,
+ content_type=content_type,
+ )
+
+ #: Pass the response body directly through as the WSGI iterable.
+ #: This can be used when the body is a binary file or other
+ #: iterator of bytes, to skip some unnecessary checks. Use
+ #: :func:`~werkzeug.utils.send_file` instead of setting this
+ #: manually.
+ self.direct_passthrough = direct_passthrough
+ self._on_close: list[t.Callable[[], t.Any]] = []
+
+ # we set the response after the headers so that if a class changes
+ # the charset attribute, the data is set in the correct charset.
+ if response is None:
+ self.response = []
+ elif isinstance(response, (str, bytes, bytearray)):
+ self.set_data(response)
+ else:
+ self.response = response
+
+ def call_on_close(self, func: t.Callable[[], t.Any]) -> t.Callable[[], t.Any]:
+ """Adds a function to the internal list of functions that should
+ be called as part of closing down the response. Since 0.7 this
+ function also returns the function that was passed so that this
+ can be used as a decorator.
+
+ .. versionadded:: 0.6
+ """
+ self._on_close.append(func)
+ return func
+
+ def __repr__(self) -> str:
+ if self.is_sequence:
+ body_info = f"{sum(map(len, self.iter_encoded()))} bytes"
+ else:
+ body_info = "streamed" if self.is_streamed else "likely-streamed"
+ return f"<{type(self).__name__} {body_info} [{self.status}]>"
+
+ @classmethod
+ def force_type(
+ cls, response: Response, environ: WSGIEnvironment | None = None
+ ) -> Response:
+ """Enforce that the WSGI response is a response object of the current
+ type. Werkzeug will use the :class:`Response` internally in many
+ situations like the exceptions. If you call :meth:`get_response` on an
+ exception you will get back a regular :class:`Response` object, even
+ if you are using a custom subclass.
+
+ This method can enforce a given response type, and it will also
+ convert arbitrary WSGI callables into response objects if an environ
+ is provided::
+
+ # convert a Werkzeug response object into an instance of the
+ # MyResponseClass subclass.
+ response = MyResponseClass.force_type(response)
+
+ # convert any WSGI application into a response object
+ response = MyResponseClass.force_type(response, environ)
+
+ This is especially useful if you want to post-process responses in
+ the main dispatcher and use functionality provided by your subclass.
+
+ Keep in mind that this will modify response objects in place if
+ possible!
+
+ :param response: a response object or wsgi application.
+ :param environ: a WSGI environment object.
+ :return: a response object.
+ """
+ if not isinstance(response, Response):
+ if environ is None:
+ raise TypeError(
+ "cannot convert WSGI application into response"
+ " objects without an environ"
+ )
+
+ from ..test import run_wsgi_app
+
+ response = Response(*run_wsgi_app(response, environ))
+
+ response.__class__ = cls
+ return response
+
+ @classmethod
+ def from_app(
+ cls, app: WSGIApplication, environ: WSGIEnvironment, buffered: bool = False
+ ) -> Response:
+ """Create a new response object from an application output. This
+ works best if you pass it an application that returns a generator all
+ the time. Sometimes applications may use the `write()` callable
+ returned by the `start_response` function. This tries to resolve such
+ edge cases automatically. But if you don't get the expected output
+ you should set `buffered` to `True` which enforces buffering.
+
+ :param app: the WSGI application to execute.
+ :param environ: the WSGI environment to execute against.
+ :param buffered: set to `True` to enforce buffering.
+ :return: a response object.
+ """
+ from ..test import run_wsgi_app
+
+ return cls(*run_wsgi_app(app, environ, buffered))
+
+ @t.overload
+ def get_data(self, as_text: t.Literal[False] = False) -> bytes: ...
+
+ @t.overload
+ def get_data(self, as_text: t.Literal[True]) -> str: ...
+
+ def get_data(self, as_text: bool = False) -> bytes | str:
+ """The string representation of the response body. Whenever you call
+ this property the response iterable is encoded and flattened. This
+ can lead to unwanted behavior if you stream big data.
+
+ This behavior can be disabled by setting
+ :attr:`implicit_sequence_conversion` to `False`.
+
+ If `as_text` is set to `True` the return value will be a decoded
+ string.
+
+ .. versionadded:: 0.9
+ """
+ self._ensure_sequence()
+ rv = b"".join(self.iter_encoded())
+
+ if as_text:
+ return rv.decode()
+
+ return rv
+
+ def set_data(self, value: bytes | str) -> None:
+ """Sets a new string as response. The value must be a string or
+ bytes. If a string is set it's encoded to the charset of the
+ response (utf-8 by default).
+
+ .. versionadded:: 0.9
+ """
+ if isinstance(value, str):
+ value = value.encode()
+ self.response = [value]
+ if self.automatically_set_content_length:
+ self.headers["Content-Length"] = str(len(value))
+
+ data = property(
+ get_data,
+ set_data,
+ doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.",
+ )
+
+ def calculate_content_length(self) -> int | None:
+ """Returns the content length if available or `None` otherwise."""
+ try:
+ self._ensure_sequence()
+ except RuntimeError:
+ return None
+ return sum(len(x) for x in self.iter_encoded())
+
+ def _ensure_sequence(self, mutable: bool = False) -> None:
+ """This method can be called by methods that need a sequence. If
+ `mutable` is true, it will also ensure that the response sequence
+ is a standard Python list.
+
+ .. versionadded:: 0.6
+ """
+ if self.is_sequence:
+ # if we need a mutable object, we ensure it's a list.
+ if mutable and not isinstance(self.response, list):
+ self.response = list(self.response) # type: ignore
+ return
+ if self.direct_passthrough:
+ raise RuntimeError(
+ "Attempted implicit sequence conversion but the"
+ " response object is in direct passthrough mode."
+ )
+ if not self.implicit_sequence_conversion:
+ raise RuntimeError(
+ "The response object required the iterable to be a"
+ " sequence, but the implicit conversion was disabled."
+ " Call make_sequence() yourself."
+ )
+ self.make_sequence()
+
+ def make_sequence(self) -> None:
+ """Converts the response iterator in a list. By default this happens
+ automatically if required. If `implicit_sequence_conversion` is
+ disabled, this method is not automatically called and some properties
+ might raise exceptions. This also encodes all the items.
+
+ .. versionadded:: 0.6
+ """
+ if not self.is_sequence:
+ # if we consume an iterable we have to ensure that the close
+ # method of the iterable is called if available when we tear
+ # down the response
+ close = getattr(self.response, "close", None)
+ self.response = list(self.iter_encoded())
+ if close is not None:
+ self.call_on_close(close)
+
+ def iter_encoded(self) -> t.Iterator[bytes]:
+ """Iter the response encoded with the encoding of the response.
+ If the response object is invoked as WSGI application the return
+ value of this method is used as application iterator unless
+ :attr:`direct_passthrough` was activated.
+ """
+ # Encode in a separate function so that self.response is fetched
+ # early. This allows us to wrap the response with the return
+ # value from get_app_iter or iter_encoded.
+ return _iter_encoded(self.response)
+
+ @property
+ def is_streamed(self) -> bool:
+ """If the response is streamed (the response is not an iterable with
+ a length information) this property is `True`. In this case streamed
+ means that there is no information about the number of iterations.
+ This is usually `True` if a generator is passed to the response object.
+
+ This is useful for checking before applying some sort of post
+ filtering that should not take place for streamed responses.
+ """
+ try:
+ len(self.response) # type: ignore
+ except (TypeError, AttributeError):
+ return True
+ return False
+
+ @property
+ def is_sequence(self) -> bool:
+ """If the iterator is buffered, this property will be `True`. A
+ response object will consider an iterator to be buffered if the
+ response attribute is a list or tuple.
+
+ .. versionadded:: 0.6
+ """
+ return isinstance(self.response, (tuple, list))
+
+ def close(self) -> None:
+ """Close the wrapped response if possible. You can also use the object
+ in a with statement which will automatically close it.
+
+ .. versionadded:: 0.9
+ Can now be used in a with statement.
+ """
+ if hasattr(self.response, "close"):
+ self.response.close()
+ for func in self._on_close:
+ func()
+
+ def __enter__(self) -> Response:
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb): # type: ignore
+ self.close()
+
+ def freeze(self) -> None:
+ """Make the response object ready to be pickled. Does the
+ following:
+
+ * Buffer the response into a list, ignoring
+ :attr:`implicity_sequence_conversion` and
+ :attr:`direct_passthrough`.
+ * Set the ``Content-Length`` header.
+ * Generate an ``ETag`` header if one is not already set.
+
+ .. versionchanged:: 2.1
+ Removed the ``no_etag`` parameter.
+
+ .. versionchanged:: 2.0
+ An ``ETag`` header is always added.
+
+ .. versionchanged:: 0.6
+ The ``Content-Length`` header is set.
+ """
+ # Always freeze the encoded response body, ignore
+ # implicit_sequence_conversion and direct_passthrough.
+ self.response = list(self.iter_encoded())
+ self.headers["Content-Length"] = str(sum(map(len, self.response)))
+ self.add_etag()
+
+ def get_wsgi_headers(self, environ: WSGIEnvironment) -> Headers:
+ """This is automatically called right before the response is started
+ and returns headers modified for the given environment. It returns a
+ copy of the headers from the response with some modifications applied
+ if necessary.
+
+ For example the location header (if present) is joined with the root
+ URL of the environment. Also the content length is automatically set
+ to zero here for certain status codes.
+
+ .. versionchanged:: 0.6
+ Previously that function was called `fix_headers` and modified
+ the response object in place. Also since 0.6, IRIs in location
+ and content-location headers are handled properly.
+
+ Also starting with 0.6, Werkzeug will attempt to set the content
+ length if it is able to figure it out on its own. This is the
+ case if all the strings in the response iterable are already
+ encoded and the iterable is buffered.
+
+ :param environ: the WSGI environment of the request.
+ :return: returns a new :class:`~werkzeug.datastructures.Headers`
+ object.
+ """
+ headers = Headers(self.headers)
+ location: str | None = None
+ content_location: str | None = None
+ content_length: str | int | None = None
+ status = self.status_code
+
+ # iterate over the headers to find all values in one go. Because
+ # get_wsgi_headers is used each response that gives us a tiny
+ # speedup.
+ for key, value in headers:
+ ikey = key.lower()
+ if ikey == "location":
+ location = value
+ elif ikey == "content-location":
+ content_location = value
+ elif ikey == "content-length":
+ content_length = value
+
+ if location is not None:
+ location = iri_to_uri(location)
+
+ if self.autocorrect_location_header:
+ # Make the location header an absolute URL.
+ current_url = get_current_url(environ, strip_querystring=True)
+ current_url = iri_to_uri(current_url)
+ location = urljoin(current_url, location)
+
+ headers["Location"] = location
+
+ # make sure the content location is a URL
+ if content_location is not None:
+ headers["Content-Location"] = iri_to_uri(content_location)
+
+ if 100 <= status < 200 or status == 204:
+ # Per section 3.3.2 of RFC 7230, "a server MUST NOT send a
+ # Content-Length header field in any response with a status
+ # code of 1xx (Informational) or 204 (No Content)."
+ headers.remove("Content-Length")
+ elif status == 304:
+ remove_entity_headers(headers)
+
+ # if we can determine the content length automatically, we
+ # should try to do that. But only if this does not involve
+ # flattening the iterator or encoding of strings in the
+ # response. We however should not do that if we have a 304
+ # response.
+ if (
+ self.automatically_set_content_length
+ and self.is_sequence
+ and content_length is None
+ and status not in (204, 304)
+ and not (100 <= status < 200)
+ ):
+ content_length = sum(len(x) for x in self.iter_encoded())
+ headers["Content-Length"] = str(content_length)
+
+ return headers
+
+ def get_app_iter(self, environ: WSGIEnvironment) -> t.Iterable[bytes]:
+ """Returns the application iterator for the given environ. Depending
+ on the request method and the current status code the return value
+ might be an empty response rather than the one from the response.
+
+ If the request method is `HEAD` or the status code is in a range
+ where the HTTP specification requires an empty response, an empty
+ iterable is returned.
+
+ .. versionadded:: 0.6
+
+ :param environ: the WSGI environment of the request.
+ :return: a response iterable.
+ """
+ status = self.status_code
+ if (
+ environ["REQUEST_METHOD"] == "HEAD"
+ or 100 <= status < 200
+ or status in (204, 304)
+ ):
+ iterable: t.Iterable[bytes] = ()
+ elif self.direct_passthrough:
+ return self.response # type: ignore
+ else:
+ iterable = self.iter_encoded()
+ return ClosingIterator(iterable, self.close)
+
+ def get_wsgi_response(
+ self, environ: WSGIEnvironment
+ ) -> tuple[t.Iterable[bytes], str, list[tuple[str, str]]]:
+ """Returns the final WSGI response as tuple. The first item in
+ the tuple is the application iterator, the second the status and
+ the third the list of headers. The response returned is created
+ specially for the given environment. For example if the request
+ method in the WSGI environment is ``'HEAD'`` the response will
+ be empty and only the headers and status code will be present.
+
+ .. versionadded:: 0.6
+
+ :param environ: the WSGI environment of the request.
+ :return: an ``(app_iter, status, headers)`` tuple.
+ """
+ headers = self.get_wsgi_headers(environ)
+ app_iter = self.get_app_iter(environ)
+ return app_iter, self.status, headers.to_wsgi_list()
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> t.Iterable[bytes]:
+ """Process this response as WSGI application.
+
+ :param environ: the WSGI environment.
+ :param start_response: the response callable provided by the WSGI
+ server.
+ :return: an application iterator
+ """
+ app_iter, status, headers = self.get_wsgi_response(environ)
+ start_response(status, headers)
+ return app_iter
+
+ # JSON
+
+ #: A module or other object that has ``dumps`` and ``loads``
+ #: functions that match the API of the built-in :mod:`json` module.
+ json_module = json
+
+ @property
+ def json(self) -> t.Any | None:
+ """The parsed JSON data if :attr:`mimetype` indicates JSON
+ (:mimetype:`application/json`, see :attr:`is_json`).
+
+ Calls :meth:`get_json` with default arguments.
+ """
+ return self.get_json()
+
+ @t.overload
+ def get_json(self, force: bool = ..., silent: t.Literal[False] = ...) -> t.Any: ...
+
+ @t.overload
+ def get_json(self, force: bool = ..., silent: bool = ...) -> t.Any | None: ...
+
+ def get_json(self, force: bool = False, silent: bool = False) -> t.Any | None:
+ """Parse :attr:`data` as JSON. Useful during testing.
+
+ If the mimetype does not indicate JSON
+ (:mimetype:`application/json`, see :attr:`is_json`), this
+ returns ``None``.
+
+ Unlike :meth:`Request.get_json`, the result is not cached.
+
+ :param force: Ignore the mimetype and always try to parse JSON.
+ :param silent: Silence parsing errors and return ``None``
+ instead.
+ """
+ if not (force or self.is_json):
+ return None
+
+ data = self.get_data()
+
+ try:
+ return self.json_module.loads(data)
+ except ValueError:
+ if not silent:
+ raise
+
+ return None
+
+ # Stream
+
+ @cached_property
+ def stream(self) -> ResponseStream:
+ """The response iterable as write-only stream."""
+ return ResponseStream(self)
+
+ def _wrap_range_response(self, start: int, length: int) -> None:
+ """Wrap existing Response in case of Range Request context."""
+ if self.status_code == 206:
+ self.response = _RangeWrapper(self.response, start, length) # type: ignore
+
+ def _is_range_request_processable(self, environ: WSGIEnvironment) -> bool:
+ """Return ``True`` if `Range` header is present and if underlying
+ resource is considered unchanged when compared with `If-Range` header.
+ """
+ return (
+ "HTTP_IF_RANGE" not in environ
+ or not is_resource_modified(
+ environ,
+ self.headers.get("etag"),
+ None,
+ self.headers.get("last-modified"),
+ ignore_if_range=False,
+ )
+ ) and "HTTP_RANGE" in environ
+
+ def _process_range_request(
+ self,
+ environ: WSGIEnvironment,
+ complete_length: int | None,
+ accept_ranges: bool | str,
+ ) -> bool:
+ """Handle Range Request related headers (RFC7233). If `Accept-Ranges`
+ header is valid, and Range Request is processable, we set the headers
+ as described by the RFC, and wrap the underlying response in a
+ RangeWrapper.
+
+ Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
+
+ :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
+ if `Range` header could not be parsed or satisfied.
+
+ .. versionchanged:: 2.0
+ Returns ``False`` if the length is 0.
+ """
+ from ..exceptions import RequestedRangeNotSatisfiable
+
+ if (
+ not accept_ranges
+ or complete_length is None
+ or complete_length == 0
+ or not self._is_range_request_processable(environ)
+ ):
+ return False
+
+ if accept_ranges is True:
+ accept_ranges = "bytes"
+
+ parsed_range = parse_range_header(environ.get("HTTP_RANGE"))
+
+ if parsed_range is None:
+ raise RequestedRangeNotSatisfiable(complete_length)
+
+ range_tuple = parsed_range.range_for_length(complete_length)
+ content_range_header = parsed_range.to_content_range_header(complete_length)
+
+ if range_tuple is None or content_range_header is None:
+ raise RequestedRangeNotSatisfiable(complete_length)
+
+ content_length = range_tuple[1] - range_tuple[0]
+ self.headers["Content-Length"] = str(content_length)
+ self.headers["Accept-Ranges"] = accept_ranges
+ self.content_range = content_range_header # type: ignore
+ self.status_code = 206
+ self._wrap_range_response(range_tuple[0], content_length)
+ return True
+
+ def make_conditional(
+ self,
+ request_or_environ: WSGIEnvironment | Request,
+ accept_ranges: bool | str = False,
+ complete_length: int | None = None,
+ ) -> Response:
+ """Make the response conditional to the request. This method works
+ best if an etag was defined for the response already. The `add_etag`
+ method can be used to do that. If called without etag just the date
+ header is set.
+
+ This does nothing if the request method in the request or environ is
+ anything but GET or HEAD.
+
+ For optimal performance when handling range requests, it's recommended
+ that your response data object implements `seekable`, `seek` and `tell`
+ methods as described by :py:class:`io.IOBase`. Objects returned by
+ :meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
+
+ It does not remove the body of the response because that's something
+ the :meth:`__call__` function does for us automatically.
+
+ Returns self so that you can do ``return resp.make_conditional(req)``
+ but modifies the object in-place.
+
+ :param request_or_environ: a request object or WSGI environment to be
+ used to make the response conditional
+ against.
+ :param accept_ranges: This parameter dictates the value of
+ `Accept-Ranges` header. If ``False`` (default),
+ the header is not set. If ``True``, it will be set
+ to ``"bytes"``. If it's a string, it will use this
+ value.
+ :param complete_length: Will be used only in valid Range Requests.
+ It will set `Content-Range` complete length
+ value and compute `Content-Length` real value.
+ This parameter is mandatory for successful
+ Range Requests completion.
+ :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
+ if `Range` header could not be parsed or satisfied.
+
+ .. versionchanged:: 2.0
+ Range processing is skipped if length is 0 instead of
+ raising a 416 Range Not Satisfiable error.
+ """
+ environ = _get_environ(request_or_environ)
+ if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
+ # if the date is not in the headers, add it now. We however
+ # will not override an already existing header. Unfortunately
+ # this header will be overridden by many WSGI servers including
+ # wsgiref.
+ if "date" not in self.headers:
+ self.headers["Date"] = http_date()
+ is206 = self._process_range_request(environ, complete_length, accept_ranges)
+ if not is206 and not is_resource_modified(
+ environ,
+ self.headers.get("etag"),
+ None,
+ self.headers.get("last-modified"),
+ ):
+ if parse_etags(environ.get("HTTP_IF_MATCH")):
+ self.status_code = 412
+ else:
+ self.status_code = 304
+ if (
+ self.automatically_set_content_length
+ and "content-length" not in self.headers
+ ):
+ length = self.calculate_content_length()
+ if length is not None:
+ self.headers["Content-Length"] = str(length)
+ return self
+
+ def add_etag(self, overwrite: bool = False, weak: bool = False) -> None:
+ """Add an etag for the current response if there is none yet.
+
+ .. versionchanged:: 2.0
+ SHA-1 is used to generate the value. MD5 may not be
+ available in some environments.
+ """
+ if overwrite or "etag" not in self.headers:
+ self.set_etag(generate_etag(self.get_data()), weak)
+
+
+class ResponseStream:
+ """A file descriptor like object used by :meth:`Response.stream` to
+ represent the body of the stream. It directly pushes into the
+ response iterable of the response object.
+ """
+
+ mode = "wb+"
+
+ def __init__(self, response: Response):
+ self.response = response
+ self.closed = False
+
+ def write(self, value: bytes) -> int:
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+ self.response._ensure_sequence(mutable=True)
+ self.response.response.append(value) # type: ignore
+ self.response.headers.pop("Content-Length", None)
+ return len(value)
+
+ def writelines(self, seq: t.Iterable[bytes]) -> None:
+ for item in seq:
+ self.write(item)
+
+ def close(self) -> None:
+ self.closed = True
+
+ def flush(self) -> None:
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+
+ def isatty(self) -> bool:
+ if self.closed:
+ raise ValueError("I/O operation on closed file")
+ return False
+
+ def tell(self) -> int:
+ self.response._ensure_sequence()
+ return sum(map(len, self.response.response))
+
+ @property
+ def encoding(self) -> str:
+ return "utf-8"
diff --git a/venv/lib/python3.8/site-packages/werkzeug/wsgi.py b/venv/lib/python3.8/site-packages/werkzeug/wsgi.py
new file mode 100644
index 0000000..01d40af
--- /dev/null
+++ b/venv/lib/python3.8/site-packages/werkzeug/wsgi.py
@@ -0,0 +1,595 @@
+from __future__ import annotations
+
+import io
+import typing as t
+from functools import partial
+from functools import update_wrapper
+
+from .exceptions import ClientDisconnected
+from .exceptions import RequestEntityTooLarge
+from .sansio import utils as _sansio_utils
+from .sansio.utils import host_is_trusted # noqa: F401 # Imported as part of API
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+
+def responder(f: t.Callable[..., WSGIApplication]) -> WSGIApplication:
+ """Marks a function as responder. Decorate a function with it and it
+ will automatically call the return value as WSGI application.
+
+ Example::
+
+ @responder
+ def application(environ, start_response):
+ return Response('Hello World!')
+ """
+ return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
+
+
+def get_current_url(
+ environ: WSGIEnvironment,
+ root_only: bool = False,
+ strip_querystring: bool = False,
+ host_only: bool = False,
+ trusted_hosts: t.Iterable[str] | None = None,
+) -> str:
+ """Recreate the URL for a request from the parts in a WSGI
+ environment.
+
+ The URL is an IRI, not a URI, so it may contain Unicode characters.
+ Use :func:`~werkzeug.urls.iri_to_uri` to convert it to ASCII.
+
+ :param environ: The WSGI environment to get the URL parts from.
+ :param root_only: Only build the root path, don't include the
+ remaining path or query string.
+ :param strip_querystring: Don't include the query string.
+ :param host_only: Only build the scheme and host.
+ :param trusted_hosts: A list of trusted host names to validate the
+ host against.
+ """
+ parts = {
+ "scheme": environ["wsgi.url_scheme"],
+ "host": get_host(environ, trusted_hosts),
+ }
+
+ if not host_only:
+ parts["root_path"] = environ.get("SCRIPT_NAME", "")
+
+ if not root_only:
+ parts["path"] = environ.get("PATH_INFO", "")
+
+ if not strip_querystring:
+ parts["query_string"] = environ.get("QUERY_STRING", "").encode("latin1")
+
+ return _sansio_utils.get_current_url(**parts)
+
+
+def _get_server(
+ environ: WSGIEnvironment,
+) -> tuple[str, int | None] | None:
+ name = environ.get("SERVER_NAME")
+
+ if name is None:
+ return None
+
+ try:
+ port: int | None = int(environ.get("SERVER_PORT", None))
+ except (TypeError, ValueError):
+ # unix socket
+ port = None
+
+ return name, port
+
+
+def get_host(
+ environ: WSGIEnvironment, trusted_hosts: t.Iterable[str] | None = None
+) -> str:
+ """Return the host for the given WSGI environment.
+
+ The ``Host`` header is preferred, then ``SERVER_NAME`` if it's not
+ set. The returned host will only contain the port if it is different
+ than the standard port for the protocol.
+
+ Optionally, verify that the host is trusted using
+ :func:`host_is_trusted` and raise a
+ :exc:`~werkzeug.exceptions.SecurityError` if it is not.
+
+ :param environ: A WSGI environment dict.
+ :param trusted_hosts: A list of trusted host names.
+
+ :return: Host, with port if necessary.
+ :raise ~werkzeug.exceptions.SecurityError: If the host is not
+ trusted.
+ """
+ return _sansio_utils.get_host(
+ environ["wsgi.url_scheme"],
+ environ.get("HTTP_HOST"),
+ _get_server(environ),
+ trusted_hosts,
+ )
+
+
+def get_content_length(environ: WSGIEnvironment) -> int | None:
+ """Return the ``Content-Length`` header value as an int. If the header is not given
+ or the ``Transfer-Encoding`` header is ``chunked``, ``None`` is returned to indicate
+ a streaming request. If the value is not an integer, or negative, 0 is returned.
+
+ :param environ: The WSGI environ to get the content length from.
+
+ .. versionadded:: 0.9
+ """
+ return _sansio_utils.get_content_length(
+ http_content_length=environ.get("CONTENT_LENGTH"),
+ http_transfer_encoding=environ.get("HTTP_TRANSFER_ENCODING"),
+ )
+
+
+def get_input_stream(
+ environ: WSGIEnvironment,
+ safe_fallback: bool = True,
+ max_content_length: int | None = None,
+) -> t.IO[bytes]:
+ """Return the WSGI input stream, wrapped so that it may be read safely without going
+ past the ``Content-Length`` header value or ``max_content_length``.
+
+ If ``Content-Length`` exceeds ``max_content_length``, a
+ :exc:`RequestEntityTooLarge`` ``413 Content Too Large`` error is raised.
+
+ If the WSGI server sets ``environ["wsgi.input_terminated"]``, it indicates that the
+ server handles terminating the stream, so it is safe to read directly. For example,
+ a server that knows how to handle chunked requests safely would set this.
+
+ If ``max_content_length`` is set, it can be enforced on streams if
+ ``wsgi.input_terminated`` is set. Otherwise, an empty stream is returned unless the
+ user explicitly disables this safe fallback.
+
+ If the limit is reached before the underlying stream is exhausted (such as a file
+ that is too large, or an infinite stream), the remaining contents of the stream
+ cannot be read safely. Depending on how the server handles this, clients may show a
+ "connection reset" failure instead of seeing the 413 response.
+
+ :param environ: The WSGI environ containing the stream.
+ :param safe_fallback: Return an empty stream when ``Content-Length`` is not set.
+ Disabling this allows infinite streams, which can be a denial-of-service risk.
+ :param max_content_length: The maximum length that content-length or streaming
+ requests may not exceed.
+
+ .. versionchanged:: 2.3.2
+ ``max_content_length`` is only applied to streaming requests if the server sets
+ ``wsgi.input_terminated``.
+
+ .. versionchanged:: 2.3
+ Check ``max_content_length`` and raise an error if it is exceeded.
+
+ .. versionadded:: 0.9
+ """
+ stream = t.cast(t.IO[bytes], environ["wsgi.input"])
+ content_length = get_content_length(environ)
+
+ if content_length is not None and max_content_length is not None:
+ if content_length > max_content_length:
+ raise RequestEntityTooLarge()
+
+ # A WSGI server can set this to indicate that it terminates the input stream. In
+ # that case the stream is safe without wrapping, or can enforce a max length.
+ if "wsgi.input_terminated" in environ:
+ if max_content_length is not None:
+ # If this is moved above, it can cause the stream to hang if a read attempt
+ # is made when the client sends no data. For example, the development server
+ # does not handle buffering except for chunked encoding.
+ return t.cast(
+ t.IO[bytes], LimitedStream(stream, max_content_length, is_max=True)
+ )
+
+ return stream
+
+ # No limit given, return an empty stream unless the user explicitly allows the
+ # potentially infinite stream. An infinite stream is dangerous if it's not expected,
+ # as it can tie up a worker indefinitely.
+ if content_length is None:
+ return io.BytesIO() if safe_fallback else stream
+
+ return t.cast(t.IO[bytes], LimitedStream(stream, content_length))
+
+
+def get_path_info(environ: WSGIEnvironment) -> str:
+ """Return ``PATH_INFO`` from the WSGI environment.
+
+ :param environ: WSGI environment to get the path from.
+
+ .. versionchanged:: 3.0
+ The ``charset`` and ``errors`` parameters were removed.
+
+ .. versionadded:: 0.9
+ """
+ path: bytes = environ.get("PATH_INFO", "").encode("latin1")
+ return path.decode(errors="replace")
+
+
+class ClosingIterator:
+ """The WSGI specification requires that all middlewares and gateways
+ respect the `close` callback of the iterable returned by the application.
+ Because it is useful to add another close action to a returned iterable
+ and adding a custom iterable is a boring task this class can be used for
+ that::
+
+ return ClosingIterator(app(environ, start_response), [cleanup_session,
+ cleanup_locals])
+
+ If there is just one close function it can be passed instead of the list.
+
+ A closing iterator is not needed if the application uses response objects
+ and finishes the processing if the response is started::
+
+ try:
+ return response(environ, start_response)
+ finally:
+ cleanup_session()
+ cleanup_locals()
+ """
+
+ def __init__(
+ self,
+ iterable: t.Iterable[bytes],
+ callbacks: None
+ | (t.Callable[[], None] | t.Iterable[t.Callable[[], None]]) = None,
+ ) -> None:
+ iterator = iter(iterable)
+ self._next = t.cast(t.Callable[[], bytes], partial(next, iterator))
+ if callbacks is None:
+ callbacks = []
+ elif callable(callbacks):
+ callbacks = [callbacks]
+ else:
+ callbacks = list(callbacks)
+ iterable_close = getattr(iterable, "close", None)
+ if iterable_close:
+ callbacks.insert(0, iterable_close)
+ self._callbacks = callbacks
+
+ def __iter__(self) -> ClosingIterator:
+ return self
+
+ def __next__(self) -> bytes:
+ return self._next()
+
+ def close(self) -> None:
+ for callback in self._callbacks:
+ callback()
+
+
+def wrap_file(
+ environ: WSGIEnvironment, file: t.IO[bytes], buffer_size: int = 8192
+) -> t.Iterable[bytes]:
+ """Wraps a file. This uses the WSGI server's file wrapper if available
+ or otherwise the generic :class:`FileWrapper`.
+
+ .. versionadded:: 0.5
+
+ If the file wrapper from the WSGI server is used it's important to not
+ iterate over it from inside the application but to pass it through
+ unchanged. If you want to pass out a file wrapper inside a response
+ object you have to set :attr:`Response.direct_passthrough` to `True`.
+
+ More information about file wrappers are available in :pep:`333`.
+
+ :param file: a :class:`file`-like object with a :meth:`~file.read` method.
+ :param buffer_size: number of bytes for one iteration.
+ """
+ return environ.get("wsgi.file_wrapper", FileWrapper)( # type: ignore
+ file, buffer_size
+ )
+
+
+class FileWrapper:
+ """This class can be used to convert a :class:`file`-like object into
+ an iterable. It yields `buffer_size` blocks until the file is fully
+ read.
+
+ You should not use this class directly but rather use the
+ :func:`wrap_file` function that uses the WSGI server's file wrapper
+ support if it's available.
+
+ .. versionadded:: 0.5
+
+ If you're using this object together with a :class:`Response` you have
+ to use the `direct_passthrough` mode.
+
+ :param file: a :class:`file`-like object with a :meth:`~file.read` method.
+ :param buffer_size: number of bytes for one iteration.
+ """
+
+ def __init__(self, file: t.IO[bytes], buffer_size: int = 8192) -> None:
+ self.file = file
+ self.buffer_size = buffer_size
+
+ def close(self) -> None:
+ if hasattr(self.file, "close"):
+ self.file.close()
+
+ def seekable(self) -> bool:
+ if hasattr(self.file, "seekable"):
+ return self.file.seekable()
+ if hasattr(self.file, "seek"):
+ return True
+ return False
+
+ def seek(self, *args: t.Any) -> None:
+ if hasattr(self.file, "seek"):
+ self.file.seek(*args)
+
+ def tell(self) -> int | None:
+ if hasattr(self.file, "tell"):
+ return self.file.tell()
+ return None
+
+ def __iter__(self) -> FileWrapper:
+ return self
+
+ def __next__(self) -> bytes:
+ data = self.file.read(self.buffer_size)
+ if data:
+ return data
+ raise StopIteration()
+
+
+class _RangeWrapper:
+ # private for now, but should we make it public in the future ?
+
+ """This class can be used to convert an iterable object into
+ an iterable that will only yield a piece of the underlying content.
+ It yields blocks until the underlying stream range is fully read.
+ The yielded blocks will have a size that can't exceed the original
+ iterator defined block size, but that can be smaller.
+
+ If you're using this object together with a :class:`Response` you have
+ to use the `direct_passthrough` mode.
+
+ :param iterable: an iterable object with a :meth:`__next__` method.
+ :param start_byte: byte from which read will start.
+ :param byte_range: how many bytes to read.
+ """
+
+ def __init__(
+ self,
+ iterable: t.Iterable[bytes] | t.IO[bytes],
+ start_byte: int = 0,
+ byte_range: int | None = None,
+ ):
+ self.iterable = iter(iterable)
+ self.byte_range = byte_range
+ self.start_byte = start_byte
+ self.end_byte = None
+
+ if byte_range is not None:
+ self.end_byte = start_byte + byte_range
+
+ self.read_length = 0
+ self.seekable = hasattr(iterable, "seekable") and iterable.seekable()
+ self.end_reached = False
+
+ def __iter__(self) -> _RangeWrapper:
+ return self
+
+ def _next_chunk(self) -> bytes:
+ try:
+ chunk = next(self.iterable)
+ self.read_length += len(chunk)
+ return chunk
+ except StopIteration:
+ self.end_reached = True
+ raise
+
+ def _first_iteration(self) -> tuple[bytes | None, int]:
+ chunk = None
+ if self.seekable:
+ self.iterable.seek(self.start_byte) # type: ignore
+ self.read_length = self.iterable.tell() # type: ignore
+ contextual_read_length = self.read_length
+ else:
+ while self.read_length <= self.start_byte:
+ chunk = self._next_chunk()
+ if chunk is not None:
+ chunk = chunk[self.start_byte - self.read_length :]
+ contextual_read_length = self.start_byte
+ return chunk, contextual_read_length
+
+ def _next(self) -> bytes:
+ if self.end_reached:
+ raise StopIteration()
+ chunk = None
+ contextual_read_length = self.read_length
+ if self.read_length == 0:
+ chunk, contextual_read_length = self._first_iteration()
+ if chunk is None:
+ chunk = self._next_chunk()
+ if self.end_byte is not None and self.read_length >= self.end_byte:
+ self.end_reached = True
+ return chunk[: self.end_byte - contextual_read_length]
+ return chunk
+
+ def __next__(self) -> bytes:
+ chunk = self._next()
+ if chunk:
+ return chunk
+ self.end_reached = True
+ raise StopIteration()
+
+ def close(self) -> None:
+ if hasattr(self.iterable, "close"):
+ self.iterable.close()
+
+
+class LimitedStream(io.RawIOBase):
+ """Wrap a stream so that it doesn't read more than a given limit. This is used to
+ limit ``wsgi.input`` to the ``Content-Length`` header value or
+ :attr:`.Request.max_content_length`.
+
+ When attempting to read after the limit has been reached, :meth:`on_exhausted` is
+ called. When the limit is a maximum, this raises :exc:`.RequestEntityTooLarge`.
+
+ If reading from the stream returns zero bytes or raises an error,
+ :meth:`on_disconnect` is called, which raises :exc:`.ClientDisconnected`. When the
+ limit is a maximum and zero bytes were read, no error is raised, since it may be the
+ end of the stream.
+
+ If the limit is reached before the underlying stream is exhausted (such as a file
+ that is too large, or an infinite stream), the remaining contents of the stream
+ cannot be read safely. Depending on how the server handles this, clients may show a
+ "connection reset" failure instead of seeing the 413 response.
+
+ :param stream: The stream to read from. Must be a readable binary IO object.
+ :param limit: The limit in bytes to not read past. Should be either the
+ ``Content-Length`` header value or ``request.max_content_length``.
+ :param is_max: Whether the given ``limit`` is ``request.max_content_length`` instead
+ of the ``Content-Length`` header value. This changes how exhausted and
+ disconnect events are handled.
+
+ .. versionchanged:: 2.3
+ Handle ``max_content_length`` differently than ``Content-Length``.
+
+ .. versionchanged:: 2.3
+ Implements ``io.RawIOBase`` rather than ``io.IOBase``.
+ """
+
+ def __init__(self, stream: t.IO[bytes], limit: int, is_max: bool = False) -> None:
+ self._stream = stream
+ self._pos = 0
+ self.limit = limit
+ self._limit_is_max = is_max
+
+ @property
+ def is_exhausted(self) -> bool:
+ """Whether the current stream position has reached the limit."""
+ return self._pos >= self.limit
+
+ def on_exhausted(self) -> None:
+ """Called when attempting to read after the limit has been reached.
+
+ The default behavior is to do nothing, unless the limit is a maximum, in which
+ case it raises :exc:`.RequestEntityTooLarge`.
+
+ .. versionchanged:: 2.3
+ Raises ``RequestEntityTooLarge`` if the limit is a maximum.
+
+ .. versionchanged:: 2.3
+ Any return value is ignored.
+ """
+ if self._limit_is_max:
+ raise RequestEntityTooLarge()
+
+ def on_disconnect(self, error: Exception | None = None) -> None:
+ """Called when an attempted read receives zero bytes before the limit was
+ reached. This indicates that the client disconnected before sending the full
+ request body.
+
+ The default behavior is to raise :exc:`.ClientDisconnected`, unless the limit is
+ a maximum and no error was raised.
+
+ .. versionchanged:: 2.3
+ Added the ``error`` parameter. Do nothing if the limit is a maximum and no
+ error was raised.
+
+ .. versionchanged:: 2.3
+ Any return value is ignored.
+ """
+ if not self._limit_is_max or error is not None:
+ raise ClientDisconnected()
+
+ # If the limit is a maximum, then we may have read zero bytes because the
+ # streaming body is complete. There's no way to distinguish that from the
+ # client disconnecting early.
+
+ def exhaust(self) -> bytes:
+ """Exhaust the stream by reading until the limit is reached or the client
+ disconnects, returning the remaining data.
+
+ .. versionchanged:: 2.3
+ Return the remaining data.
+
+ .. versionchanged:: 2.2.3
+ Handle case where wrapped stream returns fewer bytes than requested.
+ """
+ if not self.is_exhausted:
+ return self.readall()
+
+ return b""
+
+ def readinto(self, b: bytearray) -> int | None: # type: ignore[override]
+ size = len(b)
+ remaining = self.limit - self._pos
+
+ if remaining <= 0:
+ self.on_exhausted()
+ return 0
+
+ if hasattr(self._stream, "readinto"):
+ # Use stream.readinto if it's available.
+ if size <= remaining:
+ # The size fits in the remaining limit, use the buffer directly.
+ try:
+ out_size: int | None = self._stream.readinto(b)
+ except (OSError, ValueError) as e:
+ self.on_disconnect(error=e)
+ return 0
+ else:
+ # Use a temp buffer with the remaining limit as the size.
+ temp_b = bytearray(remaining)
+
+ try:
+ out_size = self._stream.readinto(temp_b)
+ except (OSError, ValueError) as e:
+ self.on_disconnect(error=e)
+ return 0
+
+ if out_size:
+ b[:out_size] = temp_b
+ else:
+ # WSGI requires that stream.read is available.
+ try:
+ data = self._stream.read(min(size, remaining))
+ except (OSError, ValueError) as e:
+ self.on_disconnect(error=e)
+ return 0
+
+ out_size = len(data)
+ b[:out_size] = data
+
+ if not out_size:
+ # Read zero bytes from the stream.
+ self.on_disconnect()
+ return 0
+
+ self._pos += out_size
+ return out_size
+
+ def readall(self) -> bytes:
+ if self.is_exhausted:
+ self.on_exhausted()
+ return b""
+
+ out = bytearray()
+
+ # The parent implementation uses "while True", which results in an extra read.
+ while not self.is_exhausted:
+ data = self.read(1024 * 64)
+
+ # Stream may return empty before a max limit is reached.
+ if not data:
+ break
+
+ out.extend(data)
+
+ return bytes(out)
+
+ def tell(self) -> int:
+ """Return the current stream position.
+
+ .. versionadded:: 0.9
+ """
+ return self._pos
+
+ def readable(self) -> bool:
+ return True