diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py
new file mode 100755
index 000000000000..a33b39314fb8
--- /dev/null
+++ b/.ci/scripts/auditwheel_wrapper.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
+# compatible wheel, if so rename the wheel before repairing it.
+
+import argparse
+import os
+import subprocess
+from typing import Optional
+from zipfile import ZipFile
+
+from packaging.tags import Tag
+from packaging.utils import parse_wheel_filename
+from packaging.version import Version
+
+
+def check_is_abi3_compatible(wheel_file: str) -> None:
+ """Check the contents of the built wheel for any `.so` files that are *not*
+ abi3 compatible.
+ """
+
+ with ZipFile(wheel_file, "r") as wheel:
+ for file in wheel.namelist():
+ if not file.endswith(".so"):
+ continue
+
+ if not file.endswith(".abi3.so"):
+ raise Exception(f"Found non-abi3 lib: {file}")
+
+
+def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
+ """Replaces the cpython wheel file with a ABI3 compatible wheel"""
+
+ if tag.abi == "abi3":
+ # Nothing to do.
+ return wheel_file
+
+ check_is_abi3_compatible(wheel_file)
+
+ abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
+
+ dirname = os.path.dirname(wheel_file)
+ new_wheel_file = os.path.join(
+ dirname,
+ f"{name}-{version}-{abi3_tag}.whl",
+ )
+
+ os.rename(wheel_file, new_wheel_file)
+
+ print("Renamed wheel to", new_wheel_file)
+
+ return new_wheel_file
+
+
+def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
+ """Entry point"""
+
+ # Parse the wheel file name into its parts. Note that `parse_wheel_filename`
+ # normalizes the package name (i.e. it converts matrix_synapse ->
+ # matrix-synapse), which is not what we want.
+ _, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
+ name = os.path.basename(wheel_file).split("-")[0]
+
+ if len(tags) != 1:
+ # We expect only a wheel file with only a single tag
+ raise Exception(f"Unexpectedly found multiple tags: {tags}")
+
+ tag = next(iter(tags))
+
+ if build:
+ # We don't use build tags in Synapse
+ raise Exception(f"Unexpected build tag: {build}")
+
+ # If the wheel is for cpython then convert it into an abi3 wheel.
+ if tag.interpreter.startswith("cp"):
+ wheel_file = cpython(wheel_file, name, version, tag)
+
+ # Finally, repair the wheel.
+ if archs is not None:
+ # If we are given archs then we are on macos and need to use
+ # `delocate-listdeps`.
+ subprocess.run(["delocate-listdeps", wheel_file], check=True)
+ subprocess.run(
+ ["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
+ check=True,
+ )
+ else:
+ subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
+
+ parser.add_argument(
+ "--wheel-dir",
+ "-w",
+ metavar="WHEEL_DIR",
+ help="Directory to store delocated wheels",
+ required=True,
+ )
+
+ parser.add_argument(
+ "--require-archs",
+ metavar="archs",
+ default=None,
+ )
+
+ parser.add_argument(
+ "wheel_file",
+ metavar="WHEEL_FILE",
+ )
+
+ args = parser.parse_args()
+
+ wheel_file = args.wheel_file
+ wheel_dir = args.wheel_dir
+ archs = args.require_archs
+
+ main(wheel_file, wheel_dir, archs)
diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py
index b1f604eeb0d0..0cdc20e19c31 100755
--- a/.ci/scripts/calculate_jobs.py
+++ b/.ci/scripts/calculate_jobs.py
@@ -18,6 +18,13 @@
import json
import os
+
+def set_output(key: str, value: str):
+ # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
+ with open(os.environ["GITHUB_OUTPUT"], "at") as f:
+ print(f"{key}={value}", file=f)
+
+
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
@@ -39,7 +46,7 @@
"database": "sqlite",
"extras": "all",
}
- for version in ("3.8", "3.9", "3.10")
+ for version in ("3.8", "3.9", "3.10", "3.11")
)
@@ -47,7 +54,7 @@
{
"python-version": "3.7",
"database": "postgres",
- "postgres-version": "10",
+ "postgres-version": "11",
"extras": "all",
}
]
@@ -55,9 +62,9 @@
if not IS_PR:
trial_postgres_tests.append(
{
- "python-version": "3.10",
+ "python-version": "3.11",
"database": "postgres",
- "postgres-version": "14",
+ "postgres-version": "15",
"extras": "all",
}
)
@@ -81,7 +88,7 @@
test_matrix = json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
)
-print(f"::set-output name=trial_test_matrix::{test_matrix}")
+set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
@@ -125,4 +132,4 @@
print("::endgroup::")
test_matrix = json.dumps(sytest_tests)
-print(f"::set-output name=sytest_test_matrix::{test_matrix}")
+set_output("sytest_test_matrix", test_matrix)
diff --git a/.ci/scripts/setup_complement_prerequisites.sh b/.ci/scripts/setup_complement_prerequisites.sh
index 4848901cbfd6..3778478da644 100755
--- a/.ci/scripts/setup_complement_prerequisites.sh
+++ b/.ci/scripts/setup_complement_prerequisites.sh
@@ -21,7 +21,7 @@ endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
- go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
+ go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
diff --git a/.dockerignore b/.dockerignore
index 1c6905b1bbb3..0b51345cbdb0 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -9,6 +9,7 @@
!pyproject.toml
!poetry.lock
!Cargo.lock
+!Cargo.toml
!build_rust.py
rust/target
diff --git a/.editorconfig b/.editorconfig
index d629bede5ec5..bf9021ff821d 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -4,7 +4,7 @@
root = true
# 4 space indentation
-[*.py]
+[*.{py,pyi}]
indent_style = space
indent_size = 4
max_line_length = 88
diff --git a/.flake8 b/.flake8
index acb118c86e84..4c6a4d5843e2 100644
--- a/.flake8
+++ b/.flake8
@@ -8,4 +8,11 @@
# E203: whitespace before ':' (which is contrary to pep8?)
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
-ignore=W503,W504,E203,E731,E501
+#
+# flake8-bugbear runs extra checks. Its error codes are described at
+# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
+# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
+# B023: Functions defined inside a loop must not use variables redefined in the loop
+# B024: Abstract base class with no abstract method.
+
+ignore=W503,W504,E203,E731,E501,B019,B023,B024
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
index 1b304198bc8f..abe0f656a28b 100644
--- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
+++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
@@ -74,6 +74,36 @@ body:
- Debian packages from packages.matrix.org
- pip (from PyPI)
- Other (please mention below)
+ - I don't know
+ validations:
+ required: true
+ - type: input
+ id: database
+ attributes:
+ label: Database
+ description: |
+ Are you using SQLite or PostgreSQL? What's the version of your database?
+
+ If PostgreSQL, please also answer the following:
+ - are you using a single PostgreSQL server
+ or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
+ - have you previously ported from SQLite using the Synapse "portdb" script?
+ - have you previously restored from a backup?
+ validations:
+ required: true
+ - type: dropdown
+ id: workers
+ attributes:
+ label: Workers
+ description: |
+ Are you running a single Synapse process, or are you running
+ [2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
+ options:
+ - Single process
+ - Multiple workers
+ - I don't know
+ validations:
+ required: true
- type: textarea
id: platform
attributes:
@@ -83,17 +113,28 @@ body:
e.g. distro, hardware, if it's running in a vm/container, etc.
validations:
required: true
+ - type: textarea
+ id: config
+ attributes:
+ label: Configuration
+ description: |
+ Do you have any unusual config options turned on? If so, please provide details.
+
+ - Experimental or undocumented features
+ - [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
+ - [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
+ - [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
- This will be automatically formatted into code, so there is no need for backticks.
+ This will be automatically formatted into code, so there is no need for backticks (`\``).
Please be careful to remove any personal or private data.
- **Bug reports are usually very difficult to diagnose without logging.**
+ **Bug reports are usually impossible to diagnose without logging.**
render: shell
validations:
required: true
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 9c7db1fc866f..7ce353ed6409 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -15,3 +15,9 @@ updates:
directory: "/"
schedule:
interval: "weekly"
+
+ - package-ecosystem: "cargo"
+ directory: "/"
+ versioning-strategy: "lockfile-only"
+ schedule:
+ interval: "weekly"
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index b3793e5c1f28..49427ab50d0f 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -27,7 +27,7 @@ jobs:
- name: Inspect builder
run: docker buildx inspect
-
+
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
@@ -55,3 +55,8 @@ jobs:
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
+
+ # arm64 builds OOM without the git fetch setting. c.f.
+ # https://github.com/rust-lang/cargo/issues/10583
+ build-args: |
+ CARGO_NET_GIT_FETCH_WITH_CLI=true
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
new file mode 100644
index 000000000000..231982f6816c
--- /dev/null
+++ b/.github/workflows/docs-pr-netlify.yaml
@@ -0,0 +1,34 @@
+name: Deploy documentation PR preview
+
+on:
+ workflow_run:
+ workflows: [ "Prepare documentation PR preview" ]
+ types:
+ - completed
+
+jobs:
+ netlify:
+ if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request'
+ runs-on: ubuntu-latest
+ steps:
+ # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
+ # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
+ - name: 📥 Download artifact
+ uses: dawidd6/action-download-artifact@e6e25ac3a2b93187502a8be1ef9e9603afc34925 # v2.24.2
+ with:
+ workflow: docs-pr.yaml
+ run_id: ${{ github.event.workflow_run.id }}
+ name: book
+ path: book
+
+ - name: 📤 Deploy to Netlify
+ uses: matrix-org/netlify-pr-preview@v1
+ with:
+ path: book
+ owner: ${{ github.event.workflow_run.head_repository.owner.login }}
+ branch: ${{ github.event.workflow_run.head_branch }}
+ revision: ${{ github.event.workflow_run.head_sha }}
+ token: ${{ secrets.NETLIFY_AUTH_TOKEN }}
+ site_id: ${{ secrets.NETLIFY_SITE_ID }}
+ desc: Documentation preview
+ deployment_env: PR Documentation Preview
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
new file mode 100644
index 000000000000..cde6cf511e75
--- /dev/null
+++ b/.github/workflows/docs-pr.yaml
@@ -0,0 +1,34 @@
+name: Prepare documentation PR preview
+
+on:
+ pull_request:
+ paths:
+ - docs/**
+
+jobs:
+ pages:
+ name: GitHub Pages
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup mdbook
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
+ with:
+ mdbook-version: '0.4.17'
+
+ - name: Build the documentation
+ # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
+ # However, we're using docs/README.md for other purposes and need to pick a new page
+ # as the default. Let's opt for the welcome page instead.
+ run: |
+ mdbook build
+ cp book/welcome_and_overview.html book/index.html
+
+ - name: Upload Artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: book
+ path: book
+ # We'll only use this in a workflow_run, then we're done with it
+ retention-days: 1
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 17b9df601c59..575412d96543 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -20,7 +20,7 @@ jobs:
- uses: actions/checkout@v3
- name: Setup mdbook
- uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
with:
mdbook-version: '0.4.17'
@@ -54,11 +54,11 @@ jobs:
esac
# finally, set the 'branch-version' var.
- echo "::set-output name=branch-version::$branch"
+ echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
# Deploy to the target directory.
- name: Deploy to gh pages
- uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
+ uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index b1e45ee64860..4bc4266c4d2a 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -27,10 +27,9 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
@@ -62,10 +61,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- run: sudo apt-get -qq install xmlsec1
@@ -136,10 +134,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Ensure sytest runs `pip install`
@@ -211,7 +208,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- - uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
+ - uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml
new file mode 100644
index 000000000000..f26143de6bbf
--- /dev/null
+++ b/.github/workflows/push_complement_image.yml
@@ -0,0 +1,74 @@
+# This task does not run complement tests, see tests.yaml instead.
+# This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
+
+name: Store complement-synapse image in ghcr.io
+on:
+ push:
+ branches: [ "master" ]
+ schedule:
+ - cron: '0 5 * * *'
+ workflow_dispatch:
+ inputs:
+ branch:
+ required: true
+ default: 'develop'
+ type: choice
+ options:
+ - develop
+ - master
+
+# Only run this action once per pull request/branch; restart if a new commit arrives.
+# C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
+# and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: Build and push complement image
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - name: Checkout specific branch (debug build)
+ uses: actions/checkout@v3
+ if: github.event_name == 'workflow_dispatch'
+ with:
+ ref: ${{ inputs.branch }}
+ - name: Checkout clean copy of develop (scheduled build)
+ uses: actions/checkout@v3
+ if: github.event_name == 'schedule'
+ with:
+ ref: develop
+ - name: Checkout clean copy of master (on-push)
+ uses: actions/checkout@v3
+ if: github.event_name == 'push'
+ with:
+ ref: master
+ - name: Login to registry
+ uses: docker/login-action@v1
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Work out labels for complement image
+ id: meta
+ uses: docker/metadata-action@v4
+ with:
+ images: ghcr.io/${{ github.repository }}/complement-synapse
+ tags: |
+ type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
+ type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
+ type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
+ type=sha,format=long
+ - name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
+ run: scripts-dev/complement.sh --build-only
+ - name: Tag and push generated image
+ run: |
+ for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
+ echo "tag and push $TAG"
+ docker tag complement-synapse $TAG
+ docker push $TAG
+ done
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 1c004fbc1170..0601a7dbaf15 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -27,6 +27,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
@@ -34,7 +36,7 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
fi
- echo "::set-output name=distros::$dists"
+ echo "distros=$dists" >> "$GITHUB_OUTPUT"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
@@ -70,6 +72,8 @@ jobs:
- name: Set up python
uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
- name: Build the packages
# see https://github.com/docker/build-push-action/issues/252
@@ -91,18 +95,27 @@ jobs:
path: debs/*
build-wheels:
- name: Build wheels on ${{ matrix.os }}
+ name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-20.04, macos-10.15]
+ os: [ubuntu-20.04, macos-11]
+ arch: [x86_64, aarch64]
+ # is_pr is a flag used to exclude certain jobs from the matrix on PRs.
+ # It is not read by the rest of the workflow.
is_pr:
- ${{ startsWith(github.ref, 'refs/pull/') }}
exclude:
# Don't build macos wheels on PR CI.
- is_pr: true
- os: "macos-10.15"
+ os: "macos-11"
+ # Don't build aarch64 wheels on mac.
+ - os: "macos-11"
+ arch: aarch64
+ # Don't build aarch64 wheels on PR CI.
+ - is_pr: true
+ arch: aarch64
steps:
- uses: actions/checkout@v3
@@ -116,11 +129,19 @@ jobs:
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
- # Only build a single wheel in CI.
- - name: Set env vars.
- run: |
- echo "CIBW_BUILD="cp37-manylinux_x86_64"" >> $GITHUB_ENV
+ - name: Set up QEMU to emulate aarch64
+ if: matrix.arch == 'aarch64'
+ uses: docker/setup-qemu-action@v2
+ with:
+ platforms: arm64
+
+ - name: Build aarch64 wheels
+ if: matrix.arch == 'aarch64'
+ run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
+
+ - name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
+ run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
@@ -128,6 +149,9 @@ jobs:
# Skip testing for platforms which various libraries don't have wheels
# for, and so need extra build deps.
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
+ # Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
+ CARGO_NET_GIT_FETCH_WITH_CLI: true
+ CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v3
with:
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 1115dcc5f395..f07655d982fb 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -27,12 +27,15 @@ jobs:
rust:
- 'rust/**'
- 'Cargo.toml'
+ - 'Cargo.lock'
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
@@ -44,6 +47,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
@@ -68,6 +73,8 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
@@ -93,14 +100,38 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- override: true
components: clippy
- uses: Swatinem/rust-cache@v2
- - run: cargo clippy
+ - run: cargo clippy -- -D warnings
+
+ # We also lint against a nightly rustc so that we can lint the benchmark
+ # suite, which requires a nightly compiler.
+ lint-clippy-nightly:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: ${{ needs.changes.outputs.rust == 'true' }}
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: nightly-2022-12-01
+ components: clippy
+ - uses: Swatinem/rust-cache@v2
+
+ - run: cargo clippy --all-features -- -D warnings
lint-rustfmt:
runs-on: ubuntu-latest
@@ -111,11 +142,13 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
- toolchain: 1.58.1
- override: true
- components: rustfmt
+ toolchain: 1.58.1
+ components: rustfmt
- uses: Swatinem/rust-cache@v2
- run: cargo fmt --check
@@ -143,6 +176,8 @@ jobs:
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
outputs:
@@ -162,11 +197,25 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
+ # 1. Mount postgres data files onto a tmpfs in-memory filesystem to reduce overhead of docker's overlayfs layer.
+ # 2. Expose the unix socket for postgres. This removes latency of using docker-proxy for connections.
run: |
docker run -d -p 5432:5432 \
+ --tmpfs /var/lib/postgres:rw,size=6144m \
+ --mount 'type=bind,src=/var/run/postgresql,dst=/var/run/postgresql' \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }}
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: 1.58.1
+ - uses: Swatinem/rust-cache@v2
+
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.job.python-version }}
@@ -175,10 +224,10 @@ jobs:
if: ${{ matrix.job.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- - run: poetry run trial --jobs=2 tests
+ - run: poetry run trial --jobs=6 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
- SYNAPSE_POSTGRES_HOST: localhost
+ SYNAPSE_POSTGRES_HOST: /var/run/postgresql
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
@@ -203,10 +252,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
@@ -245,7 +296,7 @@ jobs:
python-version: '3.7'
extras: "all test"
- - run: poetry run trial -j2 tests
+ - run: poetry run trial -j6 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -319,10 +370,12 @@ jobs:
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
@@ -383,10 +436,10 @@ jobs:
matrix:
include:
- python-version: "3.7"
- postgres-version: "10"
+ postgres-version: "11"
- - python-version: "3.10"
- postgres-version: "14"
+ - python-version: "3.11"
+ postgres-version: "15"
services:
postgres:
@@ -404,6 +457,15 @@ jobs:
steps:
- uses: actions/checkout@v3
+ - name: Add PostgreSQL apt repository
+ # We need a version of pg_dump that can handle the version of
+ # PostgreSQL being tested against. The Ubuntu package repository lags
+ # behind new releases, so we have to use the PostreSQL apt repository.
+ # Steps taken from https://www.postgresql.org/download/linux/ubuntu/
+ run: |
+ sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
+ wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
+ sudo apt-get update
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
@@ -451,10 +513,12 @@ jobs:
path: synapse
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- name: Prepare Complement's Prerequisites
@@ -477,10 +541,12 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
toolchain: 1.58.1
- override: true
- uses: Swatinem/rust-cache@v2
- run: cargo test
diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml
index f926bcb75959..0f0397cf5bc6 100644
--- a/.github/workflows/triage-incoming.yml
+++ b/.github/workflows/triage-incoming.yml
@@ -5,24 +5,11 @@ on:
types: [ opened ]
jobs:
- add_new_issues:
- name: Add new issues to the triage board
- runs-on: ubuntu-latest
- steps:
- - uses: octokit/graphql-action@v2.x
- id: add_to_project
- with:
- headers: '{"GraphQL-Features": "projects_next_graphql"}'
- query: |
- mutation add_to_project($projectid:ID!,$contentid:ID!) {
- addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
- item {
- id
- }
- }
- }
- projectid: ${{ env.PROJECT_ID }}
- contentid: ${{ github.event.issue.node_id }}
- env:
- PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
- GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ triage:
+ uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
+ with:
+ project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
+ content_id: ${{ github.event.issue.node_id }}
+ secrets:
+ github_access_token: ${{ secrets.ELEMENT_BOT_TOKEN }}
+
diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml
index fbd55de17fde..d1ac4357b153 100644
--- a/.github/workflows/triage_labelled.yml
+++ b/.github/workflows/triage_labelled.yml
@@ -11,34 +11,34 @@ jobs:
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- - uses: octokit/graphql-action@v2.x
- id: add_to_project
+ - uses: actions/add-to-project@main
+ id: add_project
with:
- headers: '{"GraphQL-Features": "projects_next_graphql"}'
- query: |
- mutation {
- updateProjectV2ItemFieldValue(
- input: {
- projectId: $projectid
- itemId: $contentid
- fieldId: $fieldid
- value: {
- singleSelectOptionId: "Todo"
+ project-url: "https://github.com/orgs/matrix-org/projects/67"
+ github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ - name: Set status
+ env:
+ GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
+ run: |
+ gh api graphql -f query='
+ mutation(
+ $project: ID!
+ $item: ID!
+ $fieldid: ID!
+ $columnid: String!
+ ) {
+ updateProjectV2ItemFieldValue(
+ input: {
+ projectId: $project
+ itemId: $item
+ fieldId: $fieldid
+ value: {
+ singleSelectOptionId: $columnid
}
- }
- ) {
- projectV2Item {
- id
- }
+ }
+ ) {
+ projectV2Item {
+ id
}
}
-
- projectid: ${{ env.PROJECT_ID }}
- contentid: ${{ github.event.issue.node_id }}
- fieldid: ${{ env.FIELD_ID }}
- optionid: ${{ env.OPTION_ID }}
- env:
- PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
- GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
- FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
- OPTION_ID: "ba22e43c"
+ }' -f project="PVT_kwDOAIB0Bs4AFDdZ" -f item=${{ steps.add_project.outputs.itemId }} -f fieldid="PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4" -f columnid=ba22e43c --silent
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index b4e26118c075..262b17a20db5 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -18,10 +18,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -44,10 +43,9 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -84,10 +82,9 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: actions-rs/toolchain@v1
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
with:
- toolchain: stable
- override: true
+ toolchain: stable
- uses: Swatinem/rust-cache@v2
- name: Patch dependencies
@@ -151,12 +148,11 @@ jobs:
run: |
set -x
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
- pipx install poetry==1.1.14
+ pipx install poetry==1.2.0
poetry remove -n twisted
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
poetry lock --no-update
- # NOT IN 1.1.14 poetry lock --check
working-directory: synapse
- run: |
@@ -178,7 +174,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- - uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
+ - uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
diff --git a/CHANGES.md b/CHANGES.md
index 862524f2084e..b286ebdaf468 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,11 +1,535 @@
-Synapse 1.69.0rc2 (2022-10-06)
+Synapse 1.74.0rc1 (2022-12-13)
+==============================
+
+Features
+--------
+
+- Improve user search for international display names. ([\#14464](https://github.com/matrix-org/synapse/issues/14464))
+- Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`. ([\#14490](https://github.com/matrix-org/synapse/issues/14490), [\#14525](https://github.com/matrix-org/synapse/issues/14525))
+- Add new `push.enabled` config option to allow opting out of push notification calculation. ([\#14551](https://github.com/matrix-org/synapse/issues/14551), [\#14619](https://github.com/matrix-org/synapse/issues/14619))
+- Advertise support for Matrix 1.5 on `/_matrix/client/versions`. ([\#14576](https://github.com/matrix-org/synapse/issues/14576))
+- Improve opentracing and logging for to-device message handling. ([\#14598](https://github.com/matrix-org/synapse/issues/14598))
+- Allow selecting "prejoin" events by state keys in addition to event types. ([\#14642](https://github.com/matrix-org/synapse/issues/14642))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances. ([\#14435](https://github.com/matrix-org/synapse/issues/14435), [\#14592](https://github.com/matrix-org/synapse/issues/14592), [\#14604](https://github.com/matrix-org/synapse/issues/14604))
+- Suppress a spurious warning when `POST /rooms///`, `POST /join//` receive an empty HTTP request body. ([\#14600](https://github.com/matrix-org/synapse/issues/14600))
+- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14620](https://github.com/matrix-org/synapse/issues/14620), [\#14621](https://github.com/matrix-org/synapse/issues/14621))
+- Update html templates to load images over HTTPS. Contributed by @ashfame. ([\#14625](https://github.com/matrix-org/synapse/issues/14625))
+- Fix a long-standing bug where the user directory would return 1 more row than requested. ([\#14631](https://github.com/matrix-org/synapse/issues/14631))
+- Reject invalid read receipt requests with empty room or event IDs. Contributed by Nick @ Beeper (@fizzadar). ([\#14632](https://github.com/matrix-org/synapse/issues/14632))
+- Fix a bug introduced in Synapse 1.67.0 where not specifying a config file or a server URL would lead to the `register_new_matrix_user` script failing. ([\#14637](https://github.com/matrix-org/synapse/issues/14637))
+- Fix a long-standing bug where the user directory and room/user stats might be out of sync. ([\#14639](https://github.com/matrix-org/synapse/issues/14639), [\#14643](https://github.com/matrix-org/synapse/issues/14643))
+- Fix a bug introduced in Synapse 1.72.0 where the background updates to add non-thread unique indexes on receipts would fail if they were previously interrupted. ([\#14650](https://github.com/matrix-org/synapse/issues/14650))
+- Improve validation of field size limits in events. ([\#14664](https://github.com/matrix-org/synapse/issues/14664))
+- Fix bugs introduced in Synapse 1.55.0 and 1.69.0 where application services would not be notified of events in the correct rooms, due to stale caches. ([\#14670](https://github.com/matrix-org/synapse/issues/14670))
+
+
+Improved Documentation
+----------------------
+
+- Update worker settings for `pusher` and `federation_sender` functionality. ([\#14493](https://github.com/matrix-org/synapse/issues/14493))
+- Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages. ([\#14517](https://github.com/matrix-org/synapse/issues/14517))
+- Remove old, incorrect minimum postgres version note and replace with a link to the [Dependency Deprecation Policy](https://matrix-org.github.io/synapse/v1.73/deprecation_policy.html). ([\#14590](https://github.com/matrix-org/synapse/issues/14590))
+- Add Single-Sign On setup instructions for Mastodon-based instances. ([\#14594](https://github.com/matrix-org/synapse/issues/14594))
+- Change `turn_allow_guests` example value to lowercase `true`. ([\#14634](https://github.com/matrix-org/synapse/issues/14634))
+
+
+Internal Changes
+----------------
+
+- Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar). ([\#14255](https://github.com/matrix-org/synapse/issues/14255))
+- Faster remote room joins: stream the un-partial-stating of rooms over replication. ([\#14473](https://github.com/matrix-org/synapse/issues/14473), [\#14474](https://github.com/matrix-org/synapse/issues/14474))
+- Share the `ClientRestResource` for both workers and the main process. ([\#14528](https://github.com/matrix-org/synapse/issues/14528))
+- Add `--editable` flag to `complement.sh` which uses an editable install of Synapse for faster turn-around times whilst developing iteratively. ([\#14548](https://github.com/matrix-org/synapse/issues/14548))
+- Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room. ([\#14549](https://github.com/matrix-org/synapse/issues/14549))
+- Modernize unit tests configuration related to workers. ([\#14568](https://github.com/matrix-org/synapse/issues/14568))
+- Bump jsonschema from 4.17.0 to 4.17.3. ([\#14591](https://github.com/matrix-org/synapse/issues/14591))
+- Fix Rust lint CI. ([\#14602](https://github.com/matrix-org/synapse/issues/14602))
+- Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1. ([\#14607](https://github.com/matrix-org/synapse/issues/14607))
+- Alter some unit test environment parameters to decrease time spent running tests. ([\#14610](https://github.com/matrix-org/synapse/issues/14610))
+- Switch to Go recommended installation method for `gotestfmt` template in CI. ([\#14611](https://github.com/matrix-org/synapse/issues/14611))
+- Bump phonenumbers from 8.13.0 to 8.13.1. ([\#14612](https://github.com/matrix-org/synapse/issues/14612))
+- Bump types-setuptools from 65.5.0.3 to 65.6.0.1. ([\#14613](https://github.com/matrix-org/synapse/issues/14613))
+- Bump twine from 4.0.1 to 4.0.2. ([\#14614](https://github.com/matrix-org/synapse/issues/14614))
+- Bump types-requests from 2.28.11.2 to 2.28.11.5. ([\#14615](https://github.com/matrix-org/synapse/issues/14615))
+- Bump cryptography from 38.0.3 to 38.0.4. ([\#14616](https://github.com/matrix-org/synapse/issues/14616))
+- Remove useless cargo install with apt from Dockerfile. ([\#14636](https://github.com/matrix-org/synapse/issues/14636))
+- Bump certifi from 2021.10.8 to 2022.12.7. ([\#14645](https://github.com/matrix-org/synapse/issues/14645))
+- Bump flake8-bugbear from 22.10.27 to 22.12.6. ([\#14656](https://github.com/matrix-org/synapse/issues/14656))
+- Bump packaging from 21.3 to 22.0. ([\#14657](https://github.com/matrix-org/synapse/issues/14657))
+- Bump types-pillow from 9.3.0.1 to 9.3.0.4. ([\#14658](https://github.com/matrix-org/synapse/issues/14658))
+- Bump serde from 1.0.148 to 1.0.150. ([\#14659](https://github.com/matrix-org/synapse/issues/14659))
+- Bump phonenumbers from 8.13.1 to 8.13.2. ([\#14660](https://github.com/matrix-org/synapse/issues/14660))
+- Bump authlib from 1.1.0 to 1.2.0. ([\#14661](https://github.com/matrix-org/synapse/issues/14661))
+- Move `StateFilter` to `synapse.types`. ([\#14668](https://github.com/matrix-org/synapse/issues/14668))
+- Improve type hints. ([\#14597](https://github.com/matrix-org/synapse/issues/14597), [\#14646](https://github.com/matrix-org/synapse/issues/14646), [\#14671](https://github.com/matrix-org/synapse/issues/14671))
+
+
+Synapse 1.73.0 (2022-12-06)
+===========================
+
+Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
+
+No significant changes since 1.73.0rc2.
+
+
+Synapse 1.73.0rc2 (2022-12-01)
+==============================
+
+Bugfixes
+--------
+
+- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582))
+
+
+Synapse 1.73.0rc1 (2022-11-29)
+==============================
+
+Features
+--------
+
+- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
+- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
+- Add support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
+- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms//timestamp_to_event?ts=&dir=`, `/_matrix/federation/v1/timestamp_to_event/?ts=&dir=`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
+- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
+- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
+- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
+- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
+- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
+- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
+- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
+- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
+- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
+
+
+Improved Documentation
+----------------------
+
+- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
+
+
+Internal Changes
+----------------
+
+- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
+- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
+- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
+- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
+([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
+- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
+- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
+- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
+- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
+- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
+- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
+- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
+- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
+- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
+- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
+- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
+- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
+- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
+- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
+
+
+Synapse 1.72.0 (2022-11-22)
+===========================
+
+Please note that Synapse now only supports PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
+
+Bugfixes
+--------
+
+- Update forgotten references to legacy metrics in the included Grafana dashboard. ([\#14477](https://github.com/matrix-org/synapse/issues/14477))
+
+
+Synapse 1.72.0rc1 (2022-11-16)
+==============================
+
+Features
+--------
+
+- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
+- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
+- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
+- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
+
+
+Bugfixes
+--------
+
+- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
+- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
+- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
+- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
+- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
+- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
+- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
+- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
+- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
+- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
+
+
+Updates to the Docker image
+---------------------------
+
+- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
+- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
+
+
+Improved Documentation
+----------------------
+
+- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
+- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
+- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
+- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
+- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
+- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
+- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
+- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
+- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
+- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
+- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
+- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
+- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
+- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
+- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
+- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
+- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
+- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
+
+
+Synapse 1.71.0 (2022-11-08)
+===========================
+
+Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
+They will be removed altogether in Synapse 1.73.0.
+If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
+
+**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
+
+No significant changes since 1.71.0rc2.
+
+
+Synapse 1.71.0rc2 (2022-11-04)
+==============================
+
+Improved Documentation
+----------------------
+
+- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
+
+
+Deprecations and Removals
+-------------------------
+
+- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+
+
+Synapse 1.71.0rc1 (2022-11-01)
+==============================
+
+Features
+--------
+
+- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
+- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
+- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
+- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
+- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
+- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
+- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
+- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
+- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
+- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
+- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
+- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
+- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
+- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
+- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
+- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
+
+
+Improved Documentation
+----------------------
+
+- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
+- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
+- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
+- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
+
+
+Internal Changes
+----------------
+
+- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
+- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
+- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
+- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
+- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
+- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
+- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
+- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
+- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
+- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
+- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
+- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
+- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
+
+
+Dependency updates
+
+Runtime:
+
+- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
+- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
+- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
+- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
+- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
+- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
+- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
+- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
+
+Tooling and CI:
+
+- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
+- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
+- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
+- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
+- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
+- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
+- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
+- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
+
+
+Synapse 1.70.1 (2022-10-28)
+===========================
+
+This release fixes some regressions that were discovered in 1.70.0.
+
+[#14300](https://github.com/matrix-org/synapse/issues/14300)
+was previously reported to be a regression in 1.70.0 as well. However, we have
+since concluded that it was limited to the reporter and thus have not needed
+to include any fix for it in 1.70.1.
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
+- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
+
+
+Synapse 1.70.0 (2022-10-26)
+===========================
+
+No significant changes since 1.70.0rc2.
+
+
+Synapse 1.70.0rc2 (2022-10-25)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
+- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
+- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
+
+
+Internal Changes
+----------------
+
+- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
+- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
+
+
+Synapse 1.70.0rc1 (2022-10-19)
==============================
+Features
+--------
+
+- Support for [MSC3856](https://github.com/matrix-org/matrix-spec-proposals/pull/3856): threads list API. ([\#13394](https://github.com/matrix-org/synapse/issues/13394), [\#14171](https://github.com/matrix-org/synapse/issues/14171), [\#14175](https://github.com/matrix-org/synapse/issues/14175))
+- Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)). ([\#13776](https://github.com/matrix-org/synapse/issues/13776), [\#13824](https://github.com/matrix-org/synapse/issues/13824), [\#13877](https://github.com/matrix-org/synapse/issues/13877), [\#13878](https://github.com/matrix-org/synapse/issues/13878), [\#14050](https://github.com/matrix-org/synapse/issues/14050), [\#14140](https://github.com/matrix-org/synapse/issues/14140), [\#14159](https://github.com/matrix-org/synapse/issues/14159), [\#14163](https://github.com/matrix-org/synapse/issues/14163), [\#14174](https://github.com/matrix-org/synapse/issues/14174), [\#14222](https://github.com/matrix-org/synapse/issues/14222))
+- Stop fetching missing `prev_events` after we already know their signature is invalid. ([\#13816](https://github.com/matrix-org/synapse/issues/13816))
+- Send application service access tokens as a header (and query parameter). Implements [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832). ([\#13996](https://github.com/matrix-org/synapse/issues/13996))
+- Ignore server ACL changes when generating pushes. Implements [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786). ([\#13997](https://github.com/matrix-org/synapse/issues/13997))
+- Experimental support for redirecting to an implementation of a [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) HTTP rendezvous service. ([\#14018](https://github.com/matrix-org/synapse/issues/14018))
+- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
+- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
+- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
+- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
+- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
+- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
+
+
+Bugfixes
+--------
+
+- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
+- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
+- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
+- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
+- Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events. ([\#14065](https://github.com/matrix-org/synapse/issues/14065))
+- Fix a long-standing bug where Synapse would error on the optional 'invite_room_state' field not being provided to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14083](https://github.com/matrix-org/synapse/issues/14083))
+- Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0. ([\#14089](https://github.com/matrix-org/synapse/issues/14089))
+- Fix a bug introduced in Synapse 1.37.0 in which an incorrect key name was used for sending and receiving room metadata when knocking on a room. ([\#14102](https://github.com/matrix-org/synapse/issues/14102))
+- Fix a bug introduced in v1.69.0rc1 where the joined hosts for a given event were not being properly cached. ([\#14125](https://github.com/matrix-org/synapse/issues/14125))
+- Fix a bug introduced in Synapse 1.30.0 where purging and rejoining a room without restarting in-between would result in a broken room. ([\#14161](https://github.com/matrix-org/synapse/issues/14161), [\#14164](https://github.com/matrix-org/synapse/issues/14164))
+- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint returning potentially inaccurate closest events with `outliers` present. ([\#14215](https://github.com/matrix-org/synapse/issues/14215))
+
+
+Updates to the Docker image
+---------------------------
+
+- Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems. ([\#13955](https://github.com/matrix-org/synapse/issues/13955))
+- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
+- Prevent a class of database sharding errors when using `Dockerfile-workers` to spawn multiple instances of the same worker. Contributed by Jason Little. ([\#14165](https://github.com/matrix-org/synapse/issues/14165))
+- Set `LD_PRELOAD` to use jemalloc memory allocator in Dockerfile-workers. ([\#14182](https://github.com/matrix-org/synapse/issues/14182))
+- Fix pre-startup logging being lost when using the `Dockerfile-workers` image. ([\#14195](https://github.com/matrix-org/synapse/issues/14195))
+
+
+Improved Documentation
+----------------------
+
+- Add sample worker files for `pusher` and `federation_sender`. ([\#14077](https://github.com/matrix-org/synapse/issues/14077))
+- Improve the listener example on the metrics documentation. ([\#14078](https://github.com/matrix-org/synapse/issues/14078))
+- Expand Google OpenID Connect example config to map email attribute. Contributed by @ptman. ([\#14081](https://github.com/matrix-org/synapse/issues/14081))
+- The changelog entry ending in a full stop or exclamation mark is not optional. ([\#14087](https://github.com/matrix-org/synapse/issues/14087))
+- Fix links to jemalloc documentation, which were broken in [#13491](https://github.com/matrix-org/synapse/pull/14124). ([\#14093](https://github.com/matrix-org/synapse/issues/14093))
+- Remove not needed `replication` listener in docker compose example. ([\#14107](https://github.com/matrix-org/synapse/issues/14107))
+- Fix name of `alias_creation_rules` option in the config manual documentation. ([\#14124](https://github.com/matrix-org/synapse/issues/14124))
+- Clarify comment on event contexts. ([\#14145](https://github.com/matrix-org/synapse/issues/14145))
+- Fix dead link to the [Admin Registration API](https://matrix-org.github.io/synapse/latest/admin_api/register_api.html). ([\#14189](https://github.com/matrix-org/synapse/issues/14189))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the experimental implementation of [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772). ([\#14094](https://github.com/matrix-org/synapse/issues/14094))
+- Remove the unstable identifier for [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#14106](https://github.com/matrix-org/synapse/issues/14106), [\#14146](https://github.com/matrix-org/synapse/issues/14146))
+
+
+Internal Changes
+----------------
+
+- Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar). ([\#13991](https://github.com/matrix-org/synapse/issues/13991))
+- Update authlib from 0.15.5 to 1.1.0. ([\#14006](https://github.com/matrix-org/synapse/issues/14006))
+- Make `parse_server_name` consistent in handling invalid server names. ([\#14007](https://github.com/matrix-org/synapse/issues/14007))
+- Don't repeatedly wake up the same users for batched events. ([\#14033](https://github.com/matrix-org/synapse/issues/14033))
+- Complement test image: capture logs from nginx. ([\#14063](https://github.com/matrix-org/synapse/issues/14063))
+- Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request. ([\#14072](https://github.com/matrix-org/synapse/issues/14072))
+- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14092](https://github.com/matrix-org/synapse/issues/14092))
+- Add debug logs to figure out why an event was filtered out of the client response. ([\#14095](https://github.com/matrix-org/synapse/issues/14095))
+- Indicate what endpoint came back with a JSON response we were unable to parse. ([\#14097](https://github.com/matrix-org/synapse/issues/14097))
+- Break up calls to fetch rooms for many users. Contributed by Nick @ Beeper (@fizzadar). ([\#14109](https://github.com/matrix-org/synapse/issues/14109))
+- Faster joins: prioritise the server we joined by when restarting a partial join resync. ([\#14126](https://github.com/matrix-org/synapse/issues/14126))
+- Cache Rust build cache when building docker images. ([\#14130](https://github.com/matrix-org/synapse/issues/14130))
+- Enable dependabot for Rust dependencies. ([\#14132](https://github.com/matrix-org/synapse/issues/14132))
+- Bump typing-extensions from 4.1.1 to 4.4.0. ([\#14134](https://github.com/matrix-org/synapse/issues/14134))
+- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
+- Remove unused configuration code. ([\#14142](https://github.com/matrix-org/synapse/issues/14142))
+- Prepare for the [`gotestfmt` repository move](https://github.com/GoTestTools/gotestfmt/discussions/46). ([\#14144](https://github.com/matrix-org/synapse/issues/14144))
+- Invalidate rooms for user caches on replicated event, fix sync cache race in synapse workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14155](https://github.com/matrix-org/synapse/issues/14155))
+- Enable url previews when testing with complement. ([\#14198](https://github.com/matrix-org/synapse/issues/14198))
+- When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214))
+- Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224))
+- Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217))
+- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221))
+- Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227))
+
+
+Synapse 1.69.0 (2022-10-17)
+===========================
+
Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
+No significant changes since 1.69.0rc4.
+
+
+Synapse 1.69.0rc4 (2022-10-14)
+==============================
+
+Bugfixes
+--------
+
+- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
+
+
+Synapse 1.69.0rc3 (2022-10-12)
+==============================
+
+Bugfixes
+--------
+
+- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129))
+- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135))
+- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138))
+
+
+Internal Changes
+----------------
+
+- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085))
+
+
+Synapse 1.69.0rc2 (2022-10-06)
+==============================
+
Deprecations and Removals
-------------------------
diff --git a/Cargo.lock b/Cargo.lock
index b952b6b4c01d..6e97fb8fb10f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.65"
+version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
+checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
[[package]]
name = "arc-swap"
@@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "blake2"
-version = "0.10.4"
+version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388"
+checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e"
dependencies = [
"digest",
]
@@ -104,9 +104,9 @@ checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
[[package]]
name = "itoa"
-version = "1.0.3"
+version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
+checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
[[package]]
name = "lazy_static"
@@ -116,15 +116,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
-version = "0.2.132"
+version = "0.2.135"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
+checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
[[package]]
name = "lock_api"
-version = "0.4.7"
+version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
+checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
@@ -156,9 +156,9 @@ dependencies = [
[[package]]
name = "once_cell"
-version = "1.13.1"
+version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e"
+checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
[[package]]
name = "parking_lot"
@@ -185,18 +185,18 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.43"
+version = "1.0.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab"
+checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12f72538a0230791398a0986a6518ebd88abc3fded89007b506ed072acc831e1"
+checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543"
dependencies = [
"anyhow",
"cfg-if",
@@ -212,9 +212,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc4cf18c20f4f09995f3554e6bcf9b09bd5e4d6b67c562fdfaafa644526ba479"
+checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8"
dependencies = [
"once_cell",
"target-lexicon",
@@ -222,9 +222,9 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a41877f28d8ebd600b6aa21a17b40c3b0fc4dfe73a27b6e81ab3d895e401b0e9"
+checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc"
dependencies = [
"libc",
"pyo3-build-config",
@@ -243,9 +243,9 @@ dependencies = [
[[package]]
name = "pyo3-macros"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2e81c8d4bcc2f216dc1b665412df35e46d12ee8d3d046b381aad05f1fcf30547"
+checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@@ -255,9 +255,9 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
-version = "0.17.1"
+version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85752a767ee19399a78272cc2ab625cd7d373b2e112b4b13db28de71fa892784"
+checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
dependencies = [
"proc-macro2",
"quote",
@@ -294,9 +294,9 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.6.0"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
+checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
dependencies = [
"aho-corasick",
"memchr",
@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.145"
+version = "1.0.150"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
+checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.145"
+version = "1.0.150"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
+checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e"
dependencies = [
"proc-macro2",
"quote",
@@ -343,9 +343,9 @@ dependencies = [
[[package]]
name = "serde_json"
-version = "1.0.85"
+version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44"
+checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
dependencies = [
"itoa",
"ryu",
@@ -354,9 +354,9 @@ dependencies = [
[[package]]
name = "smallvec"
-version = "1.9.0"
+version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
+checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
[[package]]
name = "subtle"
@@ -366,9 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
-version = "1.0.99"
+version = "1.0.104"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13"
+checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
dependencies = [
"proc-macro2",
"quote",
@@ -406,9 +406,9 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "unicode-ident"
-version = "1.0.3"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf"
+checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
[[package]]
name = "unindent"
diff --git a/changelog.d/13776.feature b/changelog.d/13776.feature
deleted file mode 100644
index 5d0ae16e131b..000000000000
--- a/changelog.d/13776.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)).
diff --git a/changelog.d/13824.feature b/changelog.d/13824.feature
deleted file mode 100644
index 5d0ae16e131b..000000000000
--- a/changelog.d/13824.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)).
diff --git a/changelog.d/13877.feature b/changelog.d/13877.feature
deleted file mode 100644
index 5d0ae16e131b..000000000000
--- a/changelog.d/13877.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)).
diff --git a/changelog.d/13878.feature b/changelog.d/13878.feature
deleted file mode 100644
index 5d0ae16e131b..000000000000
--- a/changelog.d/13878.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)).
diff --git a/changelog.d/13955.docker b/changelog.d/13955.docker
deleted file mode 100644
index 043b5690a380..000000000000
--- a/changelog.d/13955.docker
+++ /dev/null
@@ -1 +0,0 @@
-Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems.
diff --git a/changelog.d/13983.misc b/changelog.d/13983.misc
deleted file mode 100644
index 69ed75be90fe..000000000000
--- a/changelog.d/13983.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump actions/setup-python from 2 to 4.
diff --git a/changelog.d/13984.misc b/changelog.d/13984.misc
deleted file mode 100644
index 120d042fade1..000000000000
--- a/changelog.d/13984.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump types-pyopenssl from 22.0.0 to 22.0.10.
diff --git a/changelog.d/13985.misc b/changelog.d/13985.misc
deleted file mode 100644
index 48e896072002..000000000000
--- a/changelog.d/13985.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump jsonschema from 4.4.0 to 4.16.0.
diff --git a/changelog.d/13986.misc b/changelog.d/13986.misc
deleted file mode 100644
index f71aed474b05..000000000000
--- a/changelog.d/13986.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump types-setuptools from 57.4.9 to 65.4.0.0.
diff --git a/changelog.d/13987.misc b/changelog.d/13987.misc
deleted file mode 100644
index b1164201bef9..000000000000
--- a/changelog.d/13987.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump types-requests from 2.27.11 to 2.28.11.
diff --git a/changelog.d/13988.misc b/changelog.d/13988.misc
deleted file mode 100644
index e8c6b4e9b1b4..000000000000
--- a/changelog.d/13988.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump isort from 5.7.0 to 5.10.1.
diff --git a/changelog.d/13991.misc b/changelog.d/13991.misc
deleted file mode 100644
index f425fb17b222..000000000000
--- a/changelog.d/13991.misc
+++ /dev/null
@@ -1 +0,0 @@
-Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar).
diff --git a/changelog.d/13996.feature b/changelog.d/13996.feature
deleted file mode 100644
index 771f1c97a381..000000000000
--- a/changelog.d/13996.feature
+++ /dev/null
@@ -1 +0,0 @@
-Send application service access tokens as a header (and query parameter). Implement [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832).
diff --git a/changelog.d/13997.feature b/changelog.d/13997.feature
deleted file mode 100644
index 23f7ed106fc9..000000000000
--- a/changelog.d/13997.feature
+++ /dev/null
@@ -1 +0,0 @@
-Ignore server ACL changes when generating pushes. Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786).
diff --git a/changelog.d/14006.misc b/changelog.d/14006.misc
deleted file mode 100644
index c06dcadf0279..000000000000
--- a/changelog.d/14006.misc
+++ /dev/null
@@ -1 +0,0 @@
-Update authlib from 0.15.5 to 1.1.0.
diff --git a/changelog.d/14032.feature b/changelog.d/14032.feature
deleted file mode 100644
index bb221d3ca605..000000000000
--- a/changelog.d/14032.feature
+++ /dev/null
@@ -1 +0,0 @@
-Advertise Matrix 1.3 support on `/_matrix/client/versions`.
diff --git a/changelog.d/14033.misc b/changelog.d/14033.misc
deleted file mode 100644
index fe42852aa57c..000000000000
--- a/changelog.d/14033.misc
+++ /dev/null
@@ -1 +0,0 @@
-Don't repeatedly wake up the same users for batched events.
\ No newline at end of file
diff --git a/changelog.d/14040.misc b/changelog.d/14040.misc
deleted file mode 100644
index 1a4d3146b567..000000000000
--- a/changelog.d/14040.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump msgpack from 1.0.3 to 1.0.4.
diff --git a/changelog.d/14041.misc b/changelog.d/14041.misc
deleted file mode 100644
index a2119627f894..000000000000
--- a/changelog.d/14041.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump types-pyyaml from 6.0.4 to 6.0.12.
diff --git a/changelog.d/14046.misc b/changelog.d/14046.misc
deleted file mode 100644
index 69ed75be90fe..000000000000
--- a/changelog.d/14046.misc
+++ /dev/null
@@ -1 +0,0 @@
-Bump actions/setup-python from 2 to 4.
diff --git a/changelog.d/14050.feature b/changelog.d/14050.feature
deleted file mode 100644
index 5d0ae16e131b..000000000000
--- a/changelog.d/14050.feature
+++ /dev/null
@@ -1 +0,0 @@
-Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)).
diff --git a/changelog.d/14053.bugfix b/changelog.d/14053.bugfix
deleted file mode 100644
index 07769f51d0ee..000000000000
--- a/changelog.d/14053.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`.
diff --git a/changelog.d/14054.feature b/changelog.d/14054.feature
deleted file mode 100644
index 9cf3f7a557d4..000000000000
--- a/changelog.d/14054.feature
+++ /dev/null
@@ -1 +0,0 @@
-Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints.
diff --git a/changelog.d/14063.misc b/changelog.d/14063.misc
deleted file mode 100644
index f0d1e47f1a0c..000000000000
--- a/changelog.d/14063.misc
+++ /dev/null
@@ -1 +0,0 @@
-Complement test image: capture logs from nginx.
diff --git a/changelog.d/14065.misc b/changelog.d/14065.misc
deleted file mode 100644
index 98998b0015fb..000000000000
--- a/changelog.d/14065.misc
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events.
diff --git a/changelog.d/14072.misc b/changelog.d/14072.misc
deleted file mode 100644
index 3070c756d534..000000000000
--- a/changelog.d/14072.misc
+++ /dev/null
@@ -1 +0,0 @@
-Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request.
diff --git a/changelog.d/14077.doc b/changelog.d/14077.doc
deleted file mode 100644
index 7853e7cd5e72..000000000000
--- a/changelog.d/14077.doc
+++ /dev/null
@@ -1 +0,0 @@
-Add sample worker files for `pusher` and `federation_sender`.
diff --git a/changelog.d/14078.doc b/changelog.d/14078.doc
deleted file mode 100644
index cbe3c8b3814e..000000000000
--- a/changelog.d/14078.doc
+++ /dev/null
@@ -1 +0,0 @@
-Improve the listener example on the metrics documentation.
diff --git a/changelog.d/14087.doc b/changelog.d/14087.doc
deleted file mode 100644
index 28d1ce67c5a3..000000000000
--- a/changelog.d/14087.doc
+++ /dev/null
@@ -1 +0,0 @@
-The changelog entry ending in a full stop or exclamation mark is not optional.
diff --git a/changelog.d/14089.bugfix b/changelog.d/14089.bugfix
deleted file mode 100644
index 4a398921bb12..000000000000
--- a/changelog.d/14089.bugfix
+++ /dev/null
@@ -1 +0,0 @@
-Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0.
diff --git a/changelog.d/14093.doc b/changelog.d/14093.doc
deleted file mode 100644
index cb1ff9490ebd..000000000000
--- a/changelog.d/14093.doc
+++ /dev/null
@@ -1 +0,0 @@
-Fix links to jemalloc documentation, which were broken in #13491.
diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md
index 4dbfee28531e..bdd3dd32e07f 100644
--- a/contrib/docker_compose_workers/README.md
+++ b/contrib/docker_compose_workers/README.md
@@ -94,20 +94,6 @@ worker_replication_host: synapse
worker_replication_http_port: 9093
```
-### Add Workers to `instance_map`
-
-Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
-
-```yaml
-instance_map:
- synapse-generic-worker-1: # The worker_name setting in your worker configuration file
- host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
- port: 8034 # The port assigned to the replication listener in your worker config file
- synapse-federation-sender-1:
- host: synapse-federation-sender-1
- port: 8034
-```
-
### Configure Federation Senders
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
@@ -122,4 +108,4 @@ federation_sender_instances:
## Other Worker types
-Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
\ No newline at end of file
+Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
diff --git a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
index 5ba42a92d2e6..5b40664d67ed 100644
--- a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
+++ b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml
@@ -5,10 +5,4 @@ worker_name: synapse-federation-sender-1
worker_replication_host: synapse
worker_replication_http_port: 9093
-worker_listeners:
- - type: http
- port: 8034
- resources:
- - names: [replication]
-
worker_log_config: /data/federation_sender.log.config
diff --git a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
index 694584105a3c..09e55df9f390 100644
--- a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
+++ b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml
@@ -6,10 +6,6 @@ worker_replication_host: synapse
worker_replication_http_port: 9093
worker_listeners:
- - type: http
- port: 8034
- resources:
- - names: [replication]
- type: http
port: 8081
x_forwarded: true
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
index 58061e2fced8..68705b6e6d26 100644
--- a/contrib/grafana/synapse.json
+++ b/contrib/grafana/synapse.json
@@ -15,7 +15,7 @@
"type": "grafana",
"id": "grafana",
"name": "Grafana",
- "version": "9.0.4"
+ "version": "9.2.2"
},
{
"type": "panel",
@@ -120,6 +120,21 @@
"datasource": {
"uid": "$datasource"
},
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 9,
"w": 12,
@@ -134,6 +149,45 @@
"show": false
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": -1,
+ "cellRadius": 0,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Inferno",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false,
+ "unit": "s"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -208,7 +262,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -439,7 +493,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -549,7 +603,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -658,7 +712,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -798,7 +852,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -919,6 +973,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -952,7 +1008,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -977,7 +1034,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "single",
@@ -1096,7 +1154,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1202,7 +1260,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1307,7 +1365,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1398,7 +1456,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -1480,6 +1538,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
@@ -1515,7 +1574,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -1528,16 +1587,20 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_http_client_requests{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_http_client_requests_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"legendFormat": "{{job}}-{{index}} {{method}}",
+ "range": true,
"refId": "A"
},
{
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_http_matrixfederationclient_requests{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_http_matrixfederationclient_requests_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"legendFormat": "{{job}}-{{index}} {{method}} (federation)",
+ "range": true,
"refId": "B"
}
],
@@ -1557,11 +1620,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:123",
"format": "reqps",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:124",
"format": "short",
"logBase": 1,
"show": true
@@ -1582,6 +1647,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "active threads",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -1614,7 +1681,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1636,7 +1704,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "single",
@@ -1702,11 +1771,26 @@
"datasource": {
"uid": "$datasource"
},
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 28
+ "y": 56
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -1716,6 +1800,45 @@
"show": false
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": -1,
+ "cellRadius": 0,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Inferno",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "reverse": false,
+ "unit": "s"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -1769,7 +1892,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 28
+ "y": 56
},
"hiddenSeries": false,
"id": 33,
@@ -1791,7 +1914,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1861,7 +1984,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 37
+ "y": 65
},
"hiddenSeries": false,
"id": 40,
@@ -1882,7 +2005,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1949,7 +2072,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 37
+ "y": 65
},
"hiddenSeries": false,
"id": 46,
@@ -1970,7 +2093,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2040,7 +2163,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 44
+ "y": 72
},
"hiddenSeries": false,
"id": 44,
@@ -2064,7 +2187,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2122,6 +2245,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"decimals": 1,
@@ -2131,7 +2255,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 44
+ "y": 72
},
"hiddenSeries": false,
"id": 45,
@@ -2155,7 +2279,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2168,10 +2292,12 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)",
+ "editorMode": "code",
+ "expr": "sum(rate(synapse_storage_events_persisted_events_sep_total{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{origin_entity}} ({{origin_type}})",
+ "range": true,
"refId": "A",
"step": 20
}
@@ -2192,12 +2318,14 @@
},
"yaxes": [
{
+ "$$hashKey": "object:232",
"format": "hertz",
"logBase": 1,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:233",
"format": "short",
"logBase": 1,
"show": true
@@ -2228,7 +2356,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 51
+ "y": 79
},
"hiddenSeries": false,
"id": 118,
@@ -2250,13 +2378,14 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
"repeatDirection": "h",
"seriesOverrides": [
{
+ "$$hashKey": "object:316",
"alias": "mean",
"linewidth": 2
}
@@ -2317,10 +2446,12 @@
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
+ "editorMode": "code",
"expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} mean",
+ "range": true,
"refId": "E"
}
],
@@ -2368,6 +2499,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -2400,7 +2533,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2416,14 +2550,15 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 51
+ "y": 79
},
"id": 222,
"options": {
"legend": {
"calcs": [],
- "displayMode": "hidden",
- "placement": "bottom"
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
},
"tooltip": {
"mode": "multi",
@@ -2496,6 +2631,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"editable": true,
@@ -2513,7 +2649,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 29
+ "y": 57
},
"hiddenSeries": false,
"id": 4,
@@ -2538,7 +2674,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2551,7 +2687,7 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -2562,6 +2698,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:234",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(216, 200, 27, 0.27)",
@@ -2570,6 +2707,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:235",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.22)",
@@ -2593,11 +2731,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:206",
"format": "hertz",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:207",
"format": "short",
"logBase": 1,
"show": true
@@ -2613,6 +2753,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"editable": true,
@@ -2630,7 +2771,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 29
+ "y": 57
},
"hiddenSeries": false,
"id": 32,
@@ -2651,7 +2792,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2664,7 +2805,7 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})",
+ "expr": "rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{method}} {{servlet}} {{job}}-{{index}}",
@@ -2689,11 +2830,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:305",
"format": "hertz",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:306",
"format": "short",
"logBase": 1,
"show": true
@@ -2709,6 +2852,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"editable": true,
@@ -2726,7 +2870,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 37
+ "y": 65
},
"hiddenSeries": false,
"id": 139,
@@ -2751,7 +2895,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2764,7 +2908,7 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_http_server_in_flight_requests_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "rate(synapse_http_server_in_flight_requests_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2775,6 +2919,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:135",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(216, 200, 27, 0.27)",
@@ -2783,6 +2928,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:136",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.22)",
@@ -2806,11 +2952,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:107",
"format": "percentunit",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:108",
"format": "short",
"logBase": 1,
"show": true
@@ -2826,6 +2974,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"editable": true,
@@ -2843,7 +2992,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 37
+ "y": 65
},
"hiddenSeries": false,
"id": 52,
@@ -2868,7 +3017,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2881,7 +3030,7 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2892,6 +3041,7 @@
],
"thresholds": [
{
+ "$$hashKey": "object:417",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(216, 200, 27, 0.27)",
@@ -2900,6 +3050,7 @@
"yaxis": "left"
},
{
+ "$$hashKey": "object:418",
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.22)",
@@ -2923,11 +3074,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:389",
"format": "s",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:390",
"format": "short",
"logBase": 1,
"show": true
@@ -2943,6 +3096,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"editable": true,
@@ -2960,7 +3114,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 45
+ "y": 73
},
"hiddenSeries": false,
"id": 7,
@@ -2984,7 +3138,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2997,7 +3151,7 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3022,11 +3176,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:488",
"format": "percentunit",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:489",
"format": "short",
"logBase": 1,
"show": true
@@ -3059,7 +3215,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 45
+ "y": 73
},
"hiddenSeries": false,
"id": 47,
@@ -3084,7 +3240,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3157,7 +3313,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 53
+ "y": 81
},
"hiddenSeries": false,
"id": 103,
@@ -3178,7 +3334,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3244,104 +3400,6 @@
"yaxis": {
"align": false
}
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "description": "Average number of hosts being rate limited across each worker type.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "barAlignment": 0,
- "drawStyle": "line",
- "fillOpacity": 0,
- "gradientMode": "none",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineInterpolation": "linear",
- "lineWidth": 1,
- "pointSize": 5,
- "scaleDistribution": {
- "type": "linear"
- },
- "showPoints": "auto",
- "spanNulls": false,
- "stacking": {
- "group": "A",
- "mode": "none"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green"
- }
- ]
- },
- "unit": "none"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 53
- },
- "id": 225,
- "options": {
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "tooltip": {
- "mode": "single",
- "sort": "desc"
- }
- },
- "targets": [
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "builder",
- "expr": "avg by(job, rate_limiter_name) (synapse_rate_limit_sleep_affected_hosts{instance=\"$instance\", job=~\"$job\", index=~\"$index\"})",
- "hide": false,
- "legendFormat": "Slept by {{job}}:{{rate_limiter_name}}",
- "range": true,
- "refId": "B"
- },
- {
- "datasource": {
- "type": "prometheus",
- "uid": "${DS_PROMETHEUS}"
- },
- "editorMode": "builder",
- "expr": "avg by(job, rate_limiter_name) (synapse_rate_limit_reject_affected_hosts{instance=\"$instance\", job=~\"$job\", index=~\"$index\"})",
- "legendFormat": "Rejected by {{job}}:{{rate_limiter_name}}",
- "range": true,
- "refId": "A"
- }
- ],
- "title": "Hosts being rate limited",
- "type": "timeseries"
}
],
"targets": [
@@ -3691,7 +3749,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 6
+ "y": 59
},
"hiddenSeries": false,
"id": 79,
@@ -3713,7 +3771,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3791,7 +3849,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 6
+ "y": 59
},
"hiddenSeries": false,
"id": 83,
@@ -3813,7 +3871,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3893,7 +3951,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 15
+ "y": 68
},
"hiddenSeries": false,
"id": 109,
@@ -3915,7 +3973,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3982,6 +4040,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
@@ -3996,7 +4055,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 15
+ "y": 68
},
"hiddenSeries": false,
"id": 111,
@@ -4018,7 +4077,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4031,11 +4090,13 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_federation_client_sent_edus_by_type{instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_federation_client_sent_edus_by_type_total{instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{type}}",
+ "range": true,
"refId": "A"
}
],
@@ -4055,11 +4116,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:462",
"format": "hertz",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:463",
"format": "short",
"logBase": 1,
"show": true
@@ -4091,7 +4154,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 24
+ "y": 77
},
"hiddenSeries": false,
"id": 142,
@@ -4111,7 +4174,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -4125,9 +4188,11 @@
"type": "prometheus",
"uid": "$datasource"
},
+ "editorMode": "code",
"expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"interval": "",
"legendFormat": "pending PDUs {{job}}-{{index}}",
+ "range": true,
"refId": "A"
},
{
@@ -4157,6 +4222,7 @@
},
"yaxes": [
{
+ "$$hashKey": "object:547",
"format": "short",
"label": "events",
"logBase": 1,
@@ -4164,6 +4230,7 @@
"show": true
},
{
+ "$$hashKey": "object:548",
"format": "short",
"label": "",
"logBase": 1,
@@ -4195,7 +4262,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 24
+ "y": 77
},
"hiddenSeries": false,
"id": 140,
@@ -4217,7 +4284,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4345,11 +4412,26 @@
"datasource": {
"uid": "$datasource"
},
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 32
+ "y": 85
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -4359,6 +4441,48 @@
"show": false
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": -1,
+ "cellValues": {
+ "decimals": 2
+ },
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "min": 0,
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Inferno",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 0,
+ "reverse": false,
+ "unit": "s"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -4412,7 +4536,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 33
+ "y": 86
},
"hiddenSeries": false,
"id": 162,
@@ -4435,7 +4559,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4628,7 +4752,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 41
+ "y": 94
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -4694,7 +4818,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 42
+ "y": 95
},
"hiddenSeries": false,
"id": 203,
@@ -4716,7 +4840,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4795,7 +4919,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 50
+ "y": 103
},
"hiddenSeries": false,
"id": 202,
@@ -4817,7 +4941,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4888,7 +5012,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 51
+ "y": 104
},
"hiddenSeries": false,
"id": 205,
@@ -4908,7 +5032,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.0.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -4975,42 +5099,765 @@
},
{
"collapsed": true,
- "datasource": {
- "type": "prometheus",
- "uid": "$datasource"
- },
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 31
},
- "id": 60,
+ "id": 227,
"panels": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": {
- "uid": "$datasource"
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 32
- },
- "hiddenSeries": false,
- "id": 51,
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "id": 239,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(increase(synapse_rate_limit_reject_total{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of rate limit rejected requests",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "id": 235,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(increase(synapse_rate_limit_sleep_total{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of requests being slept by the rate limiter",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Why is the data zero (0)? https://github.com/matrix-org/synapse/pull/13541#discussion_r951926322",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 237,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.0.4",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(increase(synapse_rate_limit_reject_affected_hosts{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of hosts being rejected by the rate limiter",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "https://github.com/matrix-org/synapse/pull/13541",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 233,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.0.4",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(increase(synapse_rate_limit_sleep_affected_hosts{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of hosts being slept by the rate limiter",
+ "type": "timeseries"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "hiddenSeries": false,
+ "id": 229,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "paceLength": 10,
+ "percentage": false,
+ "pluginVersion": "9.0.4",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "$$hashKey": "object:276",
+ "alias": "Avg",
+ "fill": 0,
+ "linewidth": 3
+ },
+ {
+ "$$hashKey": "object:277",
+ "alias": "99%",
+ "color": "#C4162A",
+ "fillBelowTo": "90%"
+ },
+ {
+ "$$hashKey": "object:278",
+ "alias": "90%",
+ "color": "#FF7383",
+ "fillBelowTo": "75%"
+ },
+ {
+ "$$hashKey": "object:279",
+ "alias": "75%",
+ "color": "#FFEE52",
+ "fillBelowTo": "50%"
+ },
+ {
+ "$$hashKey": "object:280",
+ "alias": "50%",
+ "color": "#73BF69",
+ "fillBelowTo": "25%"
+ },
+ {
+ "$$hashKey": "object:281",
+ "alias": "25%",
+ "color": "#1F60C4",
+ "fillBelowTo": "5%"
+ },
+ {
+ "$$hashKey": "object:282",
+ "alias": "5%",
+ "lines": false
+ },
+ {
+ "$$hashKey": "object:283",
+ "alias": "Average",
+ "color": "rgb(255, 255, 255)",
+ "lines": true,
+ "linewidth": 3
+ },
+ {
+ "$$hashKey": "object:284",
+ "alias": ">99%",
+ "color": "#B877D9",
+ "fill": 3,
+ "lines": true
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9995, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": ">99%",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "99%",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.9, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "90%",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.75, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "75%",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.5, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "50%",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.25, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "legendFormat": "25%",
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "histogram_quantile(0.05, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "legendFormat": "5%",
+ "refId": "G"
+ },
+ {
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "expr": "sum(rate(synapse_rate_limit_queue_wait_time_seconds_sum{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_rate_limit_queue_wait_time_seconds_count{index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
+ "legendFormat": "Average",
+ "refId": "H"
+ }
+ ],
+ "thresholds": [
+ {
+ "$$hashKey": "object:283",
+ "colorMode": "warning",
+ "fill": false,
+ "line": true,
+ "op": "gt",
+ "value": 1,
+ "yaxis": "left"
+ },
+ {
+ "$$hashKey": "object:284",
+ "colorMode": "critical",
+ "fill": false,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
+ }
+ ],
+ "timeRegions": [],
+ "title": "Rate limit queue wait time Quantiles (all workers)",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:255",
+ "format": "s",
+ "label": "",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:256",
+ "format": "hertz",
+ "label": "",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "line"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ },
+ "unit": "hertz"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Default reject threshold (50 requests within a second)"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.lineWidth",
+ "value": 2
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 231,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "expr": "sum(rate(synapse_rate_limit_sleep_total{instance=\"$instance\"}[$bucket_size]))",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "50",
+ "hide": false,
+ "legendFormat": "Default reject threshold (50 requests within a second)",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Rate of requests being slept by the rate limiter",
+ "type": "timeseries"
+ }
+ ],
+ "title": "Federation rate limiter",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 32
+ },
+ "id": 60,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "uid": "$datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 51,
"legend": {
"avg": false,
"current": false,
@@ -5205,7 +6052,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 32
+ "y": 33
},
"id": 219,
"panels": [
@@ -5799,60 +6646,87 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 33
+ "y": 34
},
"id": 58,
"panels": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": {
"uid": "$datasource"
},
"fieldConfig": {
"defaults": {
- "links": []
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
},
"overrides": []
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 9
+ "y": 35
},
- "hiddenSeries": false,
"id": 48,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [],
- "nullPointMode": "null",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
},
- "paceLength": 10,
- "percentage": false,
"pluginVersion": "9.0.4",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"datasource": {
@@ -5866,37 +6740,8 @@
"step": 20
}
],
- "thresholds": [],
- "timeRegions": [],
"title": "Avg time waiting for db conn",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "label": "",
- "logBase": 1,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "show": false
- }
- ],
- "yaxis": {
- "align": false
- }
+ "type": "timeseries"
},
{
"aliasColors": {},
@@ -5919,7 +6764,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 9
+ "y": 35
},
"hiddenSeries": false,
"id": 104,
@@ -6050,7 +6895,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 16
+ "y": 42
},
"hiddenSeries": false,
"id": 10,
@@ -6150,7 +6995,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 16
+ "y": 42
},
"hiddenSeries": false,
"id": 11,
@@ -6250,7 +7095,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 23
+ "y": 49
},
"hiddenSeries": false,
"id": 180,
@@ -6347,7 +7192,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 23
+ "y": 49
},
"hiddenSeries": false,
"id": 200,
@@ -6475,7 +7320,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 34
+ "y": 35
},
"id": 59,
"panels": [
@@ -7181,7 +8026,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 35
+ "y": 36
},
"id": 61,
"panels": [
@@ -7209,7 +8054,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 69
+ "y": 36
},
"hiddenSeries": false,
"id": 1,
@@ -7311,7 +8156,7 @@
"h": 10,
"w": 12,
"x": 12,
- "y": 69
+ "y": 36
},
"hiddenSeries": false,
"id": 8,
@@ -7411,7 +8256,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 79
+ "y": 46
},
"hiddenSeries": false,
"id": 38,
@@ -7507,7 +8352,7 @@
"h": 10,
"w": 12,
"x": 12,
- "y": 79
+ "y": 46
},
"hiddenSeries": false,
"id": 39,
@@ -7608,7 +8453,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 89
+ "y": 56
},
"hiddenSeries": false,
"id": 65,
@@ -7705,7 +8550,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 36
+ "y": 37
},
"id": 148,
"panels": [
@@ -7923,7 +8768,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 37
+ "y": 38
},
"id": 62,
"panels": [
@@ -8496,7 +9341,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 38
+ "y": 39
},
"id": 63,
"panels": [
@@ -8506,6 +9351,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
@@ -8520,7 +9366,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 14
+ "y": 40
},
"hiddenSeries": false,
"id": 43,
@@ -8542,7 +9388,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -8555,7 +9401,7 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+ "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{job}}-{{index}} {{command}}",
@@ -8579,11 +9425,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:89",
"format": "hertz",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:90",
"format": "short",
"logBase": 1,
"show": true
@@ -8653,7 +9501,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 14
+ "y": 40
},
"id": 41,
"links": [],
@@ -8661,7 +9509,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "single",
@@ -8676,7 +9525,7 @@
"uid": "${DS_PROMETHEUS}"
},
"exemplar": true,
- "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "expr": "rate(synapse_replication_tcp_resource_stream_updates_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -8749,7 +9598,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 21
+ "y": 47
},
"id": 42,
"links": [],
@@ -8757,7 +9606,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "single",
@@ -8772,7 +9622,7 @@
"uid": "${DS_PROMETHEUS}"
},
"exemplar": true,
- "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+ "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@@ -8846,7 +9696,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 21
+ "y": 47
},
"id": 220,
"links": [],
@@ -8854,7 +9704,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "single",
@@ -8869,7 +9720,7 @@
"uid": "${DS_PROMETHEUS}"
},
"exemplar": true,
- "expr": "rate(synapse_replication_tcp_protocol_inbound_rdata_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "rate(synapse_replication_tcp_protocol_inbound_rdata_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -8903,7 +9754,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 28
+ "y": 54
},
"hiddenSeries": false,
"id": 144,
@@ -8923,7 +9774,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.0.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -8981,6 +9832,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
@@ -8995,7 +9847,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 28
+ "y": 54
},
"hiddenSeries": false,
"id": 115,
@@ -9017,7 +9869,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -9030,7 +9882,7 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "expr": "rate(synapse_replication_tcp_protocol_close_reason_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{reason_type}}",
@@ -9053,11 +9905,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:260",
"format": "hertz",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:261",
"format": "short",
"logBase": 1,
"show": true
@@ -9087,7 +9941,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 35
+ "y": 61
},
"hiddenSeries": false,
"id": 113,
@@ -9109,7 +9963,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.0.4",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -9193,7 +10047,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 39
+ "y": 40
},
"id": 69,
"panels": [
@@ -9509,7 +10363,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 40
+ "y": 41
},
"id": 126,
"panels": [
@@ -10374,7 +11228,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 41
+ "y": 42
},
"id": 158,
"panels": [
@@ -10384,11 +11238,11 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10399,7 +11253,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 41
+ "y": 43
},
"hiddenSeries": false,
"id": 156,
@@ -10420,12 +11274,13 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
+ "$$hashKey": "object:632",
"alias": "Max",
"color": "#bf1b00",
"fill": 0,
@@ -10440,23 +11295,26 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "synapse_admin_mau:current{instance=\"$instance\", job=~\"$job\"}",
+ "editorMode": "code",
+ "expr": "synapse_admin_mau_max{instance=\"$instance\", job=~\"(hhs_)?synapse\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
- "legendFormat": "Current",
- "refId": "A"
+ "legendFormat": "Max",
+ "range": true,
+ "refId": "B"
},
{
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
- "expr": "synapse_admin_mau:max{instance=\"$instance\", job=~\"$job\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": "Max",
- "refId": "B"
+ "editorMode": "code",
+ "expr": "synapse_admin_mau_current{instance=\"$instance\", job=~\"(hhs_)?synapse\"}",
+ "hide": false,
+ "legendFormat": "Current",
+ "range": true,
+ "refId": "C"
}
],
"thresholds": [],
@@ -10500,19 +11358,13 @@
"datasource": {
"uid": "$datasource"
},
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 41
+ "y": 43
},
"hiddenSeries": false,
"id": 160,
@@ -10532,7 +11384,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10604,7 +11456,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 42
+ "y": 43
},
"id": 177,
"panels": [
@@ -10614,11 +11466,11 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10629,7 +11481,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 1
+ "y": 44
},
"hiddenSeries": false,
"id": 173,
@@ -10646,8 +11498,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -10660,12 +11515,14 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_notifier_users_woken_by_stream{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_notifier_users_woken_by_stream_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
"legendFormat": "{{stream}} {{index}}",
"metric": "synapse_notifier",
+ "range": true,
"refId": "A",
"step": 2
}
@@ -10686,11 +11543,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:734",
"format": "hertz",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:735",
"format": "short",
"logBase": 1,
"show": true
@@ -10706,11 +11565,11 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10721,7 +11580,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 1
+ "y": 44
},
"hiddenSeries": false,
"id": 175,
@@ -10738,8 +11597,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -10752,11 +11614,13 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_handler_presence_get_updates{job=~\"$job\",instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_handler_presence_get_updates_total{job=~\"$job\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
"legendFormat": "{{type}} {{index}}",
+ "range": true,
"refId": "A",
"step": 2
}
@@ -10777,12 +11641,14 @@
},
"yaxes": [
{
+ "$$hashKey": "object:819",
"format": "hertz",
"logBase": 1,
"min": "0",
"show": true
},
{
+ "$$hashKey": "object:820",
"format": "short",
"logBase": 1,
"show": true
@@ -10815,7 +11681,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 43
+ "y": 44
},
"id": 170,
"panels": [
@@ -10825,6 +11691,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fill": 1,
@@ -10833,7 +11700,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 43
+ "y": 45
},
"hiddenSeries": false,
"id": 168,
@@ -10853,7 +11720,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.3.2",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10866,9 +11733,11 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_appservice_api_sent_events{instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_appservice_api_sent_events_total{instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{service}}",
+ "range": true,
"refId": "A"
}
],
@@ -10910,6 +11779,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fill": 1,
@@ -10918,7 +11788,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 43
+ "y": 45
},
"hiddenSeries": false,
"id": 171,
@@ -10938,7 +11808,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.3.2",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10951,9 +11821,11 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_appservice_api_sent_transactions{instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_appservice_api_sent_transactions_total{instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{exported_service }} {{ service }}",
+ "range": true,
"refId": "A"
}
],
@@ -11012,7 +11884,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 44
+ "y": 45
},
"id": 188,
"panels": [
@@ -11024,19 +11896,13 @@
"datasource": {
"uid": "$datasource"
},
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 44
+ "y": 46
},
"hiddenSeries": false,
"id": 182,
@@ -11056,7 +11922,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -11147,21 +12013,16 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 44
+ "y": 46
},
"hiddenSeries": false,
"id": 184,
@@ -11181,7 +12042,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -11194,9 +12055,11 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_handler_presence_state_transition{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_handler_presence_state_transition_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{from}} -> {{to}}",
+ "range": true,
"refId": "A"
}
],
@@ -11216,11 +12079,13 @@
},
"yaxes": [
{
+ "$$hashKey": "object:1090",
"format": "hertz",
"logBase": 1,
"show": true
},
{
+ "$$hashKey": "object:1091",
"format": "short",
"logBase": 1,
"show": true
@@ -11236,21 +12101,16 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 52
+ "y": 54
},
"hiddenSeries": false,
"id": 186,
@@ -11270,7 +12130,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -11283,9 +12143,11 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "rate(synapse_handler_presence_notify_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "editorMode": "code",
+ "expr": "rate(synapse_handler_presence_notify_reason_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"interval": "",
"legendFormat": "{{reason}}",
+ "range": true,
"refId": "A"
}
],
@@ -11344,7 +12206,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 45
+ "y": 46
},
"id": 197,
"panels": [
@@ -11414,7 +12276,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "multi",
@@ -11654,7 +12517,8 @@
"legend": {
"calcs": [],
"displayMode": "list",
- "placement": "bottom"
+ "placement": "bottom",
+ "showLegend": true
},
"tooltip": {
"mode": "multi",
@@ -11693,7 +12557,7 @@
}
],
"refresh": false,
- "schemaVersion": 36,
+ "schemaVersion": 37,
"style": "dark",
"tags": [
"matrix"
@@ -11896,6 +12760,6 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
- "version": 133,
+ "version": 149,
"weekStart": ""
-}
+}
\ No newline at end of file
diff --git a/debian/changelog b/debian/changelog
index b228ef35fcee..64a950aff01d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,101 @@
+matrix-synapse-py3 (1.74.0~rc1) stable; urgency=medium
+
+ * New dependency on libicu-dev to provide improved results for user
+ search.
+ * New Synapse release 1.74.0rc1.
+
+ -- Synapse Packaging team Tue, 13 Dec 2022 13:30:01 +0000
+
+matrix-synapse-py3 (1.73.0) stable; urgency=medium
+
+ * New Synapse release 1.73.0.
+
+ -- Synapse Packaging team Tue, 06 Dec 2022 11:48:56 +0000
+
+matrix-synapse-py3 (1.73.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.73.0rc2.
+
+ -- Synapse Packaging team Thu, 01 Dec 2022 10:02:19 +0000
+
+matrix-synapse-py3 (1.73.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.73.0rc1.
+
+ -- Synapse Packaging team Tue, 29 Nov 2022 12:28:13 +0000
+
+matrix-synapse-py3 (1.72.0) stable; urgency=medium
+
+ * New Synapse release 1.72.0.
+
+ -- Synapse Packaging team Tue, 22 Nov 2022 10:57:30 +0000
+
+matrix-synapse-py3 (1.72.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.72.0rc1.
+
+ -- Synapse Packaging team Wed, 16 Nov 2022 15:10:59 +0000
+
+matrix-synapse-py3 (1.71.0) stable; urgency=medium
+
+ * New Synapse release 1.71.0.
+
+ -- Synapse Packaging team Tue, 08 Nov 2022 10:38:10 +0000
+
+matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.71.0rc2.
+
+ -- Synapse Packaging team Fri, 04 Nov 2022 12:00:33 +0000
+
+matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.71.0rc1.
+
+ -- Synapse Packaging team Tue, 01 Nov 2022 12:10:17 +0000
+
+matrix-synapse-py3 (1.70.1) stable; urgency=medium
+
+ * New Synapse release 1.70.1.
+
+ -- Synapse Packaging team Fri, 28 Oct 2022 12:10:21 +0100
+
+matrix-synapse-py3 (1.70.0) stable; urgency=medium
+
+ * New Synapse release 1.70.0.
+
+ -- Synapse Packaging team Wed, 26 Oct 2022 11:11:50 +0100
+
+matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.70.0rc2.
+
+ -- Synapse Packaging team Tue, 25 Oct 2022 10:59:47 +0100
+
+matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.70.0rc1.
+
+ -- Synapse Packaging team Wed, 19 Oct 2022 14:11:57 +0100
+
+matrix-synapse-py3 (1.69.0) stable; urgency=medium
+
+ * New Synapse release 1.69.0.
+
+ -- Synapse Packaging team Mon, 17 Oct 2022 11:31:03 +0100
+
+matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
+
+ * New Synapse release 1.69.0rc4.
+
+ -- Synapse Packaging team Fri, 14 Oct 2022 15:04:47 +0100
+
+matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
+
+ * New Synapse release 1.69.0rc3.
+
+ -- Synapse Packaging team Wed, 12 Oct 2022 13:24:04 +0100
+
matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
* New Synapse release 1.69.0rc2.
diff --git a/debian/control b/debian/control
index 86f5a66d021e..bc628cec085e 100644
--- a/debian/control
+++ b/debian/control
@@ -8,6 +8,8 @@ Build-Depends:
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,
+ libicu-dev,
+ pkg-config,
lsb-release,
python3-dev,
python3,
diff --git a/docker/Dockerfile b/docker/Dockerfile
index b20951d4cf62..7e5123210ad1 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -43,7 +43,7 @@ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
- build-essential cargo git libffi-dev libssl-dev \
+ build-essential git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
@@ -97,6 +97,8 @@ RUN \
zlib1g-dev \
git \
curl \
+ libicu-dev \
+ pkg-config \
&& rm -rf /var/lib/apt/lists/*
@@ -106,7 +108,13 @@ ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
-RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
+
+
+# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
+# set to true, so we expose it as a build-arg.
+ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
+ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
@@ -121,7 +129,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
COPY synapse /synapse/synapse/
COPY rust /synapse/rust/
# ... and what we need to `pip install`.
-COPY pyproject.toml README.rst build_rust.py /synapse/
+COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
# Repeat of earlier build argument declaration, as this is a new build stage.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
@@ -129,7 +137,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
# Install the synapse package itself.
# If we have populated requirements.txt, we don't install any dependencies
# as we should already have those from the previous `pip install` step.
-RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
+RUN --mount=type=cache,target=/synapse/target,sharing=locked \
+ --mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
+ if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
else \
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index ca3a259081c3..f3b5b00ce61a 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -84,6 +84,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
python3-venv \
sqlite3 \
libpq-dev \
+ libicu-dev \
+ pkg-config \
xmlsec1
# Install rust and ensure it's in the PATH
@@ -92,7 +94,7 @@ ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
-RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 0c2d4f3047af..faf7f2cef8a5 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -1,6 +1,7 @@
# syntax=docker/dockerfile:1
ARG SYNAPSE_VERSION=latest
+ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
# first of all, we create a base image with an nginx which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing
@@ -23,7 +24,7 @@ FROM debian:bullseye-slim AS deps_base
FROM redis:6-bullseye AS redis_base
# now build the final image, based on the the regular Synapse docker image
-FROM matrixdotorg/synapse:$SYNAPSE_VERSION
+FROM $FROM
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index c0935c99a8b7..be1aa1c55e09 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -7,8 +7,9 @@
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
ARG SYNAPSE_VERSION=latest
+ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
-FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
+FROM $FROM
# First of all, we copy postgres server from the official postgres image,
# since for repeated rebuilds, this is much faster than apt installing
# postgres each time.
diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
index cc6482f763ac..49d79745b064 100755
--- a/docker/complement/conf/start_for_complement.sh
+++ b/docker/complement/conf/start_for_complement.sh
@@ -45,7 +45,12 @@ esac
if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# Specify the workers to test with
- export SYNAPSE_WORKER_TYPES="\
+ # Allow overriding by explicitly setting SYNAPSE_WORKER_TYPES outside, while still
+ # utilizing WORKERS=1 for backwards compatibility.
+ # -n True if the length of string is non-zero.
+ # -z True if the length of string is zero.
+ if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
+ export SYNAPSE_WORKER_TYPES="\
event_persister, \
event_persister, \
background_worker, \
@@ -57,9 +62,12 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
federation_reader, \
federation_sender, \
synchrotron, \
+ client_reader, \
appservice, \
pusher"
+ fi
+ log "Workers requested: $SYNAPSE_WORKER_TYPES"
# Improve startup times by using a launcher based on fork()
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
else
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 9e554a865ee5..ca640c343be7 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -12,6 +12,8 @@ trusted_key_servers: []
enable_registration: true
enable_registration_without_verification: true
bcrypt_rounds: 4
+url_preview_enabled: true
+url_preview_ip_range_blacklist: []
## Registration ##
@@ -90,8 +92,6 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
- # Enable spaces support
- spaces_enabled: true
# Enable history backfilling support
msc2716_enabled: true
# server-side support for partial state in /send_join responses
@@ -100,8 +100,8 @@ experimental_features:
# client-side support for partial state in /send_join responses
faster_joins: true
{% endif %}
- # Enable jump to date endpoint
- msc3030_enabled: true
+ # Filtering /messages by relation type.
+ msc3874_enabled: true
server_notices:
system_mxid_localpart: _server
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 51583dc13de4..58c62f2231f3 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -20,7 +20,7 @@
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
-# below. Leave empty for no workers, or set to '*' for all possible workers.
+# below. Leave empty for no workers.
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -39,6 +39,7 @@
# continue to work if so.
import os
+import platform
import subprocess
import sys
from pathlib import Path
@@ -49,13 +50,18 @@
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
-
+# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
+# Watching /_matrix/client needs a "client" listener
+# Watching /_matrix/federation needs a "federation" listener
+# Watching /_matrix/media and related needs a "media" listener
+# Stream Writers require "client" and "replication" listeners because they
+# have to attach by instance_map to the master process and have client endpoints.
WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"pusher": {
- "app": "synapse.app.pusher",
+ "app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"start_pushers": False},
+ "shared_extra_conf": {},
"worker_extra_conf": "",
},
"user_dir": {
@@ -78,7 +84,11 @@
"^/_synapse/admin/v1/media/.*$",
"^/_synapse/admin/v1/quarantine_media/.*$",
],
- "shared_extra_conf": {"enable_media_repo": False},
+ # The first configured media worker will run the media background jobs
+ "shared_extra_conf": {
+ "enable_media_repo": False,
+ "media_instance_running_background_jobs": "media_repository1",
+ },
"worker_extra_conf": "enable_media_repo: true",
},
"appservice": {
@@ -89,10 +99,10 @@
"worker_extra_conf": "",
},
"federation_sender": {
- "app": "synapse.app.federation_sender",
+ "app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"send_federation": False},
+ "shared_extra_conf": {},
"worker_extra_conf": "",
},
"synchrotron": {
@@ -107,6 +117,35 @@
"shared_extra_conf": {},
"worker_extra_conf": "",
},
+ "client_reader": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
+ "^/_matrix/client/v1/rooms/.*/hierarchy$",
+ "^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
+ "^/_matrix/client/v1/rooms/.*/threads$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
+ "^/_matrix/client/versions$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
+ "^/_matrix/client/(r0|v3|unstable)/register$",
+ "^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
+ "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
+ "^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/search",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
"federation_reader": {
"app": "synapse.app.generic_worker",
"listener_resources": ["federation"],
@@ -125,6 +164,7 @@
"^/_matrix/federation/(v1|v2)/invite/",
"^/_matrix/federation/(v1|v2)/query_auth/",
"^/_matrix/federation/(v1|v2)/event_auth/",
+ "^/_matrix/federation/v1/timestamp_to_event/",
"^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
"^/_matrix/federation/(v1|v2)/user/devices/",
"^/_matrix/federation/(v1|v2)/get_groups_publicised$",
@@ -171,14 +211,54 @@
"worker_extra_conf": "",
},
"frontend_proxy": {
- "app": "synapse.app.frontend_proxy",
+ "app": "synapse.app.generic_worker",
"listener_resources": ["client", "replication"],
"endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
"shared_extra_conf": {},
- "worker_extra_conf": (
- "worker_main_http_uri: http://127.0.0.1:%d"
- % (MAIN_PROCESS_HTTP_LISTENER_PORT,)
- ),
+ "worker_extra_conf": "",
+ },
+ "account_data": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(r0|v3|unstable)/.*/tags",
+ "^/_matrix/client/(r0|v3|unstable)/.*/account_data",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "presence": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/presence/"],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "receipts": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt",
+ "^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers",
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "to_device": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": ["^/_matrix/client/(r0|v3|unstable)/sendToDevice/"],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
+ },
+ "typing": {
+ "app": "synapse.app.generic_worker",
+ "listener_resources": ["client", "replication"],
+ "endpoint_patterns": [
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing"
+ ],
+ "shared_extra_conf": {},
+ "worker_extra_conf": "",
},
}
@@ -201,24 +281,19 @@
# Utility functions
def log(txt: str) -> None:
- """Log something to the stdout.
-
- Args:
- txt: The text to log.
- """
print(txt)
def error(txt: str) -> NoReturn:
- """Log something and exit with an error code.
-
- Args:
- txt: The text to log in error.
- """
- log(txt)
+ print(txt, file=sys.stderr)
sys.exit(2)
+def flush_buffers() -> None:
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+
def convert(src: str, dst: str, **template_vars: object) -> None:
"""Generate a file from a template
@@ -247,14 +322,14 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
outfile.write(rendered)
-def add_sharding_to_shared_config(
+def add_worker_roles_to_shared_config(
shared_config: dict,
worker_type: str,
worker_name: str,
worker_port: int,
) -> None:
"""Given a dictionary representing a config file shared across all workers,
- append sharded worker information to it for the current worker_type instance.
+ append appropriate worker information to it for the current worker_type instance.
Args:
shared_config: The config dict that all worker instances share (after being converted to YAML)
@@ -285,9 +360,19 @@ def add_sharding_to_shared_config(
"port": worker_port,
}
- elif worker_type == "media_repository":
- # The first configured media worker will run the media background jobs
- shared_config.setdefault("media_instance_running_background_jobs", worker_name)
+ elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
+ # Update the list of stream writers
+ # It's convenient that the name of the worker type is the same as the stream to write
+ shared_config.setdefault("stream_writers", {}).setdefault(
+ worker_type, []
+ ).append(worker_name)
+
+ # Map of stream writer instance names to host/ports combos
+ # For now, all stream writers need http replication ports
+ instance_map[worker_name] = {
+ "host": "localhost",
+ "port": worker_port,
+ }
def generate_base_homeserver_config() -> None:
@@ -299,7 +384,7 @@ def generate_base_homeserver_config() -> None:
# start.py already does this for us, so just call that.
# note that this script is copied in in the official, monolith dockerfile
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
- subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
+ subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
def generate_worker_files(
@@ -373,8 +458,8 @@ def generate_worker_files(
# No workers, just the main process
worker_types = []
else:
- # Split type names by comma
- worker_types = worker_types_env.split(",")
+ # Split type names by comma, ignoring whitespace.
+ worker_types = [x.strip() for x in worker_types_env.split(",")]
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -393,14 +478,11 @@ def generate_worker_files(
# For each worker type specified by the user, create config values
for worker_type in worker_types:
- worker_type = worker_type.strip()
-
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
else:
- log(worker_type + " is an unknown worker type! It will be ignored")
- continue
+ error(worker_type + " is an unknown worker type! Please fix!")
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
@@ -419,11 +501,11 @@ def generate_worker_files(
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
- if worker_type_total_count > 1:
- # Update the shared config with sharding-related options if necessary
- add_sharding_to_shared_config(
- shared_config, worker_type, worker_name, worker_port
- )
+
+ # Update the shared config with sharding-related options if necessary
+ add_worker_roles_to_shared_config(
+ shared_config, worker_type, worker_name, worker_port
+ )
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
@@ -604,14 +686,24 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
with open(mark_filepath, "w") as f:
f.write("")
+ # Lifted right out of start.py
+ jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
+
+ if os.path.isfile(jemallocpath):
+ environ["LD_PRELOAD"] = jemallocpath
+ else:
+ log("Could not find %s, will not use" % (jemallocpath,))
+
# Start supervisord, which will start Synapse, all of the configured worker
# processes, redis, nginx etc. according to the config we created above.
log("Starting supervisord")
- os.execl(
+ flush_buffers()
+ os.execle(
"/usr/local/bin/supervisord",
"supervisord",
"-c",
"/etc/supervisor/supervisord.conf",
+ environ,
)
diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile
new file mode 100644
index 000000000000..0e8cf2e712ff
--- /dev/null
+++ b/docker/editable.Dockerfile
@@ -0,0 +1,75 @@
+# syntax=docker/dockerfile:1
+# This dockerfile builds an editable install of Synapse.
+#
+# Used by `complement.sh`. Not suitable for production use.
+
+ARG PYTHON_VERSION=3.9
+
+###
+### Stage 0: generate requirements.txt
+###
+# We hardcode the use of Debian bullseye here because this could change upstream
+# and other Dockerfiles used for testing are expecting bullseye.
+FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
+
+# Install Rust and other dependencies (stolen from normal Dockerfile)
+# install the OS build deps
+RUN \
+ --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
+ apt-get update -qq && apt-get install -yqq \
+ build-essential \
+ libffi-dev \
+ libjpeg-dev \
+ libpq-dev \
+ libssl-dev \
+ libwebp-dev \
+ libxml++2.6-dev \
+ libxslt1-dev \
+ openssl \
+ zlib1g-dev \
+ git \
+ curl \
+ gosu \
+ libjpeg62-turbo \
+ libpq5 \
+ libwebp6 \
+ xmlsec1 \
+ libjemalloc2 \
+ && rm -rf /var/lib/apt/lists/*
+ENV RUSTUP_HOME=/rust
+ENV CARGO_HOME=/cargo
+ENV PATH=/cargo/bin:/rust/bin:$PATH
+RUN mkdir /rust /cargo
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
+
+
+# Make a base copy of the editable source tree, so that we have something to
+# install and build now — even though it's going to be covered up by a mount
+# at runtime.
+COPY synapse /editable-src/synapse/
+COPY rust /editable-src/rust/
+# ... and what we need to `pip install`.
+COPY pyproject.toml poetry.lock README.rst build_rust.py Cargo.toml Cargo.lock /editable-src/
+
+RUN pip install poetry
+RUN poetry config virtualenvs.create false
+RUN cd /editable-src && poetry install --extras all
+
+# Make copies of useful things for inspection:
+# - the Rust module (must be copied to the editable source tree before startup)
+# - poetry.lock is useful for checking if dependencies have changed.
+RUN cp /editable-src/synapse/synapse_rust.abi3.so /synapse_rust.abi3.so.bak
+RUN cp /editable-src/poetry.lock /poetry.lock.bak
+
+
+### Extra setup from original Dockerfile
+COPY ./docker/start.py /start.py
+COPY ./docker/conf /conf
+
+EXPOSE 8008/tcp 8009/tcp 8448/tcp
+
+ENTRYPOINT ["/start.py"]
+
+HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
+ CMD curl -fSs http://localhost:8008/health || exit 1
diff --git a/docker/start.py b/docker/start.py
index 5a98dce55113..ebcc599f0402 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -13,14 +13,19 @@
# Utility functions
def log(txt: str) -> None:
- print(txt, file=sys.stderr)
+ print(txt)
def error(txt: str) -> NoReturn:
- log(txt)
+ print(txt, file=sys.stderr)
sys.exit(2)
+def flush_buffers() -> None:
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
"""Generate a file from a template
@@ -131,10 +136,10 @@ def generate_config_from_template(
if ownership is not None:
log(f"Setting ownership on /data to {ownership}")
- subprocess.check_output(["chown", "-R", ownership, "/data"])
+ subprocess.run(["chown", "-R", ownership, "/data"], check=True)
args = ["gosu", ownership] + args
- subprocess.check_output(args)
+ subprocess.run(args, check=True)
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
@@ -158,7 +163,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
if ownership is not None:
# make sure that synapse has perms to write to the data dir.
log(f"Setting ownership on {data_dir} to {ownership}")
- subprocess.check_output(["chown", ownership, data_dir])
+ subprocess.run(["chown", ownership, data_dir], check=True)
# create a suitable log config from our template
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
@@ -185,6 +190,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
"--open-private-ports",
]
# log("running %s" % (args, ))
+ flush_buffers()
os.execv(sys.executable, args)
@@ -267,8 +273,10 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
args = [sys.executable] + args
if ownership is not None:
args = ["gosu", ownership] + args
+ flush_buffers()
os.execve("/usr/sbin/gosu", args, environ)
else:
+ flush_buffers()
os.execve(sys.executable, args, environ)
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 16720bceb521..8d68719958d6 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -9,6 +9,8 @@
- [Configuring a Reverse Proxy](reverse_proxy.md)
- [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md)
- [Configuring a Turn Server](turn-howto.md)
+ - [coturn TURN server](setup/turn/coturn.md)
+ - [eturnal TURN server](setup/turn/eturnal.md)
- [Delegation](delegate.md)
# Upgrading
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 3625c7b6c5f5..880bef41947a 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -37,6 +37,7 @@ It returns a JSON body like the following:
"is_guest": 0,
"admin": 0,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"creation_ts": 1560432506,
"appservice_id": null,
@@ -167,6 +168,7 @@ A response body like the following is returned:
"admin": 0,
"user_type": null,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"displayname": "",
"avatar_url": null,
@@ -177,6 +179,7 @@ A response body like the following is returned:
"admin": 1,
"user_type": null,
"deactivated": 0,
+ "erased": false,
"shadow_banned": 0,
"displayname": "",
"avatar_url": "",
@@ -247,6 +250,7 @@ The following fields are returned in the JSON response body:
- `user_type` - string - Type of the user. Normal users are type `None`.
This allows user type specific behaviour. There are also types `support` and `bot`.
- `deactivated` - bool - Status if that user has been marked as deactivated.
+ - `erased` - bool - Status if that user has been marked as erased.
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
- `displayname` - string - The user's display name if they have set one.
- `avatar_url` - string - The user's avatar URL if they have set one.
@@ -1193,3 +1197,42 @@ Returns a `404` HTTP status code if no user was found, with a response body like
```
_Added in Synapse 1.68.0._
+
+
+### Find a user based on their Third Party ID (ThreePID or 3PID)
+
+The API is:
+
+```
+GET /_synapse/admin/v1/threepid/$medium/users/$address
+```
+
+When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
+
+```json
+{
+ "user_id": "@hello:example.org"
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `medium` - Kind of third-party ID, either `email` or `msisdn`.
+- `address` - Value of the third-party ID.
+
+The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters.
+
+**Errors**
+
+Returns a `404` HTTP status code if no user was found, with a response body like this:
+
+```json
+{
+ "errcode":"M_NOT_FOUND",
+ "error":"User not found"
+}
+```
+
+_Added in Synapse 1.72.0._
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 7f99220a3ba7..342bc1d34032 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -324,6 +324,12 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
+ - If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
+ types you wish to test. A simple comma-delimited string containing the worker types
+ defined from the `WORKERS_CONFIG` template in
+ [here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
+ A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
+ See the [worker documentation](../workers.md) for additional information on workers.
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
@@ -333,7 +339,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/compleme
### Prettier formatting with `gotestfmt`
If you want to format the output of the tests the same way as it looks in CI,
-install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt).
+install [gotestfmt](https://github.com/GoTestTools/gotestfmt).
You can then use this incantation to format the tests appropriately:
diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md
index 8f1f11f2b2e5..16e4368f35d9 100644
--- a/docs/metrics-howto.md
+++ b/docs/metrics-howto.md
@@ -209,6 +209,9 @@ altogether in Synapse v1.73.0.**
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
+| synapse_admin_mau_current | synapse_admin_mau:current |
+| synapse_admin_mau_max | synapse_admin_mau:max |
+| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------
diff --git a/docs/openid.md b/docs/openid.md
index 45ba1947b303..e4ad45f306d6 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -49,6 +49,13 @@ setting in your configuration file.
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
the text below for example configurations for specific providers.
+## OIDC Back-Channel Logout
+
+Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
+
+This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
+This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
+
## Sample configs
Here are a few configs for providers that should work with Synapse.
@@ -123,6 +130,9 @@ oidc_providers:
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
+Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
+This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
+
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
1. Click `Clients` in the sidebar and click `Create`
@@ -144,6 +154,8 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
| Client Protocol | `openid-connect` |
| Access Type | `confidential` |
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
+| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
+| Backchannel Logout Session Required (optional) | `On` |
5. Click `Save`
6. On the Credentials tab, update the fields:
@@ -167,7 +179,9 @@ oidc_providers:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
+ backchannel_logout_enabled: true # Optional
```
+
### Auth0
[Auth0][auth0] is a hosted SaaS IdP solution.
@@ -336,11 +350,12 @@ oidc_providers:
issuer: "https://accounts.google.com/"
client_id: "your-client-id" # TO BE FILLED
client_secret: "your-client-secret" # TO BE FILLED
- scopes: ["openid", "profile"]
+ scopes: ["openid", "profile", "email"] # email is optional, read below
user_mapping_provider:
config:
localpart_template: "{{ user.given_name|lower }}"
display_name_template: "{{ user.name }}"
+ email_template: "{{ user.email }}" # needs "email" in scopes above
```
4. Back in the Google console, add this Authorized redirect URI: `[synapse
public baseurl]/_synapse/client/oidc/callback`.
@@ -575,3 +590,44 @@ oidc_providers:
display_name_template: "{{ user.first_name }} {{ user.last_name }}"
email_template: "{{ user.email }}"
```
+
+### Mastodon
+
+[Mastodon](https://docs.joinmastodon.org/) instances provide an [OAuth API](https://docs.joinmastodon.org/spec/oauth/), allowing those instances to be used as a single sign-on provider for Synapse.
+
+The first step is to register Synapse as an application with your Mastodon instance, using the [Create an application API](https://docs.joinmastodon.org/methods/apps/#create) (see also [here](https://docs.joinmastodon.org/client/token/)). There are several ways to do this, but in the example below we are using CURL.
+
+This example assumes that:
+* the Mastodon instance website URL is `https://your.mastodon.instance.url`, and
+* Synapse will be registered as an app named `my_synapse_app`.
+
+Send the following request, substituting the value of `synapse_public_baseurl` from your Synapse installation.
+```sh
+curl -d "client_name=my_synapse_app&redirect_uris=https://[synapse_public_baseurl]/_synapse/client/oidc/callback" -X POST https://your.mastodon.instance.url/api/v1/apps
+```
+
+You should receive a response similar to the following. Make sure to save it.
+```json
+{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
+```
+
+As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
+
+```yaml
+oidc_providers:
+ - idp_id: my_mastodon
+ idp_name: "Mastodon Instance Example"
+ discover: false
+ issuer: "https://your.mastodon.instance.url/@admin"
+ client_id: "someclientid_123"
+ client_secret: "someclientsecret_123"
+ authorization_endpoint: "https://your.mastodon.instance.url/oauth/authorize"
+ token_endpoint: "https://your.mastodon.instance.url/oauth/token"
+ userinfo_endpoint: "https://your.mastodon.instance.url/api/v1/accounts/verify_credentials"
+ scopes: ["read"]
+ user_mapping_provider:
+ config:
+ subject_claim: "id"
+```
+
+Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
diff --git a/docs/postgres.md b/docs/postgres.md
index f2519f6b0a63..46b4603fe573 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -1,6 +1,7 @@
# Using Postgres
-Synapse supports PostgreSQL versions 10 or later.
+The minimum supported version of PostgreSQL is determined by the [Dependency
+Deprecation Policy](deprecation_policy.md).
## Install postgres client libraries
diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
index 4e7a1d443533..48dbc1c58eca 100644
--- a/docs/reverse_proxy.md
+++ b/docs/reverse_proxy.md
@@ -79,6 +79,9 @@ server {
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
+
+ # Synapse responses may be chunked, which is an HTTP/1.1 feature.
+ proxy_http_version 1.1;
}
}
```
diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml
index 3065a0e2d986..6339160d0022 100644
--- a/docs/sample_log_config.yaml
+++ b/docs/sample_log_config.yaml
@@ -6,7 +6,7 @@
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
-# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index dcd8f17c5e98..436041f8a8ca 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -84,7 +84,9 @@ file when you upgrade the Debian package to a later version.
##### Downstream Debian packages
-Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories.
+Andrej Shadura maintains a
+[`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in
+the Debian repositories.
For `bookworm` and `sid`, it can be installed simply with:
```sh
@@ -100,23 +102,27 @@ for information on how to use backports.
##### Downstream Ubuntu packages
We do not recommend using the packages in the default Ubuntu repository
-at this time, as they are old and suffer from known security vulnerabilities.
+at this time, as they are [old and suffer from known security vulnerabilities](
+ https://bugs.launchpad.net/ubuntu/+source/matrix-synapse/+bug/1848709
+).
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
#### Fedora
-Synapse is in the Fedora repositories as `matrix-synapse`:
+Synapse is in the Fedora repositories as
+[`matrix-synapse`](https://src.fedoraproject.org/rpms/matrix-synapse):
```sh
sudo dnf install matrix-synapse
```
-Oleg Girko provides Fedora RPMs at
+Additionally, Oleg Girko provides Fedora RPMs at
#### OpenSUSE
-Synapse is in the OpenSUSE repositories as `matrix-synapse`:
+Synapse is in the OpenSUSE repositories as
+[`matrix-synapse`](https://software.opensuse.org/package/matrix-synapse):
```sh
sudo zypper install matrix-synapse
@@ -151,7 +157,8 @@ sudo pip install py-bcrypt
#### Void Linux
-Synapse can be found in the void repositories as 'synapse':
+Synapse can be found in the void repositories as
+['synapse'](https://github.com/void-linux/void-packages/tree/master/srcpkgs/synapse):
```sh
xbps-install -Su
diff --git a/docs/setup/turn/coturn.md b/docs/setup/turn/coturn.md
new file mode 100644
index 000000000000..a1bb1e934c21
--- /dev/null
+++ b/docs/setup/turn/coturn.md
@@ -0,0 +1,188 @@
+# coturn TURN server
+
+The following sections describe how to install [coturn]() (which implements the TURN REST API).
+
+## `coturn` setup
+
+### Initial installation
+
+The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
+
+#### Debian and Ubuntu based distributions
+
+Just install the debian package:
+
+```sh
+sudo apt install coturn
+```
+
+This will install and start a systemd service called `coturn`.
+
+#### Source installation
+
+1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
+
+1. Configure it:
+
+ ```sh
+ ./configure
+ ```
+
+ You may need to install `libevent2`: if so, you should do so in
+ the way recommended by your operating system. You can ignore
+ warnings about lack of database support: a database is unnecessary
+ for this purpose.
+
+1. Build and install it:
+
+ ```sh
+ make
+ sudo make install
+ ```
+
+### Configuration
+
+1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
+ lines, with example values, are:
+
+ ```
+ use-auth-secret
+ static-auth-secret=[your secret key here]
+ realm=turn.myserver.org
+ ```
+
+ See `turnserver.conf` for explanations of the options. One way to generate
+ the `static-auth-secret` is with `pwgen`:
+
+ ```sh
+ pwgen -s 64 1
+ ```
+
+ A `realm` must be specified, but its value is somewhat arbitrary. (It is
+ sent to clients as part of the authentication flow.) It is conventional to
+ set it to be your server name.
+
+1. You will most likely want to configure `coturn` to write logs somewhere. The
+ easiest way is normally to send them to the syslog:
+
+ ```sh
+ syslog
+ ```
+
+ (in which case, the logs will be available via `journalctl -u coturn` on a
+ systemd system). Alternatively, `coturn` can be configured to write to a
+ logfile - check the example config file supplied with `coturn`.
+
+1. Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point:
+
+ ```
+ # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
+ no-tcp-relay
+
+ # don't let the relay ever try to connect to private IP address ranges within your network (if any)
+ # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
+ denied-peer-ip=10.0.0.0-10.255.255.255
+ denied-peer-ip=192.168.0.0-192.168.255.255
+ denied-peer-ip=172.16.0.0-172.31.255.255
+
+ # recommended additional local peers to block, to mitigate external access to internal services.
+ # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
+ no-multicast-peers
+ denied-peer-ip=0.0.0.0-0.255.255.255
+ denied-peer-ip=100.64.0.0-100.127.255.255
+ denied-peer-ip=127.0.0.0-127.255.255.255
+ denied-peer-ip=169.254.0.0-169.254.255.255
+ denied-peer-ip=192.0.0.0-192.0.0.255
+ denied-peer-ip=192.0.2.0-192.0.2.255
+ denied-peer-ip=192.88.99.0-192.88.99.255
+ denied-peer-ip=198.18.0.0-198.19.255.255
+ denied-peer-ip=198.51.100.0-198.51.100.255
+ denied-peer-ip=203.0.113.0-203.0.113.255
+ denied-peer-ip=240.0.0.0-255.255.255.255
+
+ # special case the turn server itself so that client->TURN->TURN->client flows work
+ # this should be one of the turn server's listening IPs
+ allowed-peer-ip=10.0.0.1
+
+ # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
+ user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
+ total-quota=1200
+ ```
+
+1. Also consider supporting TLS/DTLS. To do this, add the following settings
+ to `turnserver.conf`:
+
+ ```
+ # TLS certificates, including intermediate certs.
+ # For Let's Encrypt certificates, use `fullchain.pem` here.
+ cert=/path/to/fullchain.pem
+
+ # TLS private key file
+ pkey=/path/to/privkey.pem
+
+ # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
+ #no-tls
+ #no-dtls
+ ```
+
+ In this case, replace the `turn:` schemes in the `turn_uris` settings below
+ with `turns:`.
+
+ We recommend that you only try to set up TLS/DTLS once you have set up a
+ basic installation and got it working.
+
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
+1. Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (By default: 3478 and 5349 for TURN
+ traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
+ for the UDP relay.)
+
+1. If your TURN server is behind NAT, the NAT gateway must have an external,
+ publicly-reachable IP address. You must configure `coturn` to advertise that
+ address to connecting clients:
+
+ ```
+ external-ip=EXTERNAL_NAT_IPv4_ADDRESS
+ ```
+
+ You may optionally limit the TURN server to listen only on the local
+ address that is mapped by NAT to the external address:
+
+ ```
+ listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
+ ```
+
+ If your NAT gateway is reachable over both IPv4 and IPv6, you may
+ configure `coturn` to advertise each available address:
+
+ ```
+ external-ip=EXTERNAL_NAT_IPv4_ADDRESS
+ external-ip=EXTERNAL_NAT_IPv6_ADDRESS
+ ```
+
+ When advertising an external IPv6 address, ensure that the firewall and
+ network settings of the system running your TURN server are configured to
+ accept IPv6 traffic, and that the TURN server is listening on the local
+ IPv6 address that is mapped by NAT to the external IPv6 address.
+
+1. (Re)start the turn server:
+
+ * If you used the Debian package (or have set up a systemd unit yourself):
+ ```sh
+ sudo systemctl restart coturn
+ ```
+
+ * If you built from source:
+
+ ```sh
+ /usr/local/bin/turnserver -o
+ ```
diff --git a/docs/setup/turn/eturnal.md b/docs/setup/turn/eturnal.md
new file mode 100644
index 000000000000..2e5a45673ec5
--- /dev/null
+++ b/docs/setup/turn/eturnal.md
@@ -0,0 +1,170 @@
+# eturnal TURN server
+
+The following sections describe how to install [eturnal]()
+(which implements the TURN REST API).
+
+## `eturnal` setup
+
+### Initial installation
+
+The `eturnal` TURN server implementation is available from a variety of sources
+such as native package managers, binary packages, installation from source or
+[container image](https://eturnal.net/documentation/code/docker.html). They are
+all described [here](https://github.com/processone/eturnal#installation).
+
+Quick-Test instructions in a [Linux Shell](https://github.com/processone/eturnal/blob/master/QUICK-TEST.md)
+or with [Docker](https://github.com/processone/eturnal/blob/master/docker-k8s/QUICK-TEST.md)
+are available as well.
+
+### Configuration
+
+After installation, `eturnal` usually ships a [default configuration file](https://github.com/processone/eturnal/blob/master/config/eturnal.yml)
+here: `/etc/eturnal.yml` (and, if not found there, there is a backup file here:
+`/opt/eturnal/etc/eturnal.yml`). It uses the (indentation-sensitive!) [YAML](https://en.wikipedia.org/wiki/YAML)
+format. The file contains further explanations.
+
+Here are some hints how to configure eturnal on your [host machine](https://github.com/processone/eturnal#configuration)
+or when using e.g. [Docker](https://eturnal.net/documentation/code/docker.html).
+You may also further deep dive into the [reference documentation](https://eturnal.net/documentation/).
+
+`eturnal` runs out of the box with the default configuration. To enable TURN and
+to integrate it with your homeserver, some aspects in `eturnal`'s default configuration file
+must be edited:
+
+1. Homeserver's [`turn_shared_secret`](../../usage/configuration/config_documentation.md#turn_shared_secret)
+ and eturnal's shared `secret` for authentication
+
+ Both need to have the same value. Uncomment and adjust this line in `eturnal`'s
+ configuration file:
+
+ ```yaml
+ secret: "long-and-cryptic" # Shared secret, CHANGE THIS.
+ ```
+
+ One way to generate a `secret` is with `pwgen`:
+
+ ```sh
+ pwgen -s 64 1
+ ```
+
+1. Public IP address
+
+ If your TURN server is behind NAT, the NAT gateway must have an external,
+ publicly-reachable IP address. `eturnal` tries to autodetect the public IP address,
+ however, it may also be configured by uncommenting and adjusting this line, so
+ `eturnal` advertises that address to connecting clients:
+
+ ```yaml
+ relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
+ ```
+
+ If your NAT gateway is reachable over both IPv4 and IPv6, you may
+ configure `eturnal` to advertise each available address:
+
+ ```yaml
+ relay_ipv4_addr: "203.0.113.4" # The server's public IPv4 address.
+ relay_ipv6_addr: "2001:db8::4" # The server's public IPv6 address (optional).
+ ```
+
+ When advertising an external IPv6 address, ensure that the firewall and
+ network settings of the system running your TURN server are configured to
+ accept IPv6 traffic, and that the TURN server is listening on the local
+ IPv6 address that is mapped by NAT to the external IPv6 address.
+
+1. Logging
+
+ If `eturnal` was started by systemd, log files are written into the
+ `/var/log/eturnal` directory by default. In order to log to the [journal](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html)
+ instead, the `log_dir` option can be set to `stdout` in the configuration file.
+
+1. Security considerations
+
+ Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point, [see also the official documentation](https://eturnal.net/documentation/#blacklist):
+
+ ```yaml
+ ## Reject TURN relaying from/to the following addresses/networks:
+ blacklist: # This is the default blacklist.
+ - "127.0.0.0/8" # IPv4 loopback.
+ - "::1" # IPv6 loopback.
+ - recommended # Expands to a number of networks recommended to be
+ # blocked, but includes private networks. Those
+ # would have to be 'whitelist'ed if eturnal serves
+ # local clients/peers within such networks.
+ ```
+
+ To whitelist IP addresses or specific (private) networks, you need to **add** a
+ whitelist part into the configuration file, e.g.:
+
+ ```yaml
+ whitelist:
+ - "192.168.0.0/16"
+ - "203.0.113.113"
+ - "2001:db8::/64"
+ ```
+
+ The more specific, the better.
+
+1. TURNS (TURN via TLS/DTLS)
+
+ Also consider supporting TLS/DTLS. To do this, adjust the following settings
+ in the `eturnal.yml` configuration file (TLS parts should not be commented anymore):
+
+ ```yaml
+ listen:
+ - ip: "::"
+ port: 3478
+ transport: udp
+ - ip: "::"
+ port: 3478
+ transport: tcp
+ - ip: "::"
+ port: 5349
+ transport: tls
+
+ ## TLS certificate/key files (must be readable by 'eturnal' user!):
+ tls_crt_file: /etc/eturnal/tls/crt.pem
+ tls_key_file: /etc/eturnal/tls/key.pem
+ ```
+
+ In this case, replace the `turn:` schemes in homeserver's `turn_uris` settings
+ with `turns:`. More is described [here](../../usage/configuration/config_documentation.md#turn_uris).
+
+ We recommend that you only try to set up TLS/DTLS once you have set up a
+ basic installation and got it working.
+
+ NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
+ not work with any Matrix client that uses Chromium's WebRTC library. This
+ currently includes Element Android & iOS; for more details, see their
+ [respective](https://github.com/vector-im/element-android/issues/1533)
+ [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
+ [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
+ Consider using a ZeroSSL certificate for your TURN server as a working alternative.
+
+1. Firewall
+
+ Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (By default: 3478 and 5349 for TURN
+ traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
+ for the UDP relay.)
+
+1. Reload/ restarting `eturnal`
+
+ Changes in the configuration file require `eturnal` to reload/ restart, this
+ can be achieved by:
+
+ ```sh
+ eturnalctl reload
+ ```
+
+ `eturnal` performs a configuration check before actually reloading/ restarting
+ and provides hints, if something is not correctly configured.
+
+### eturnalctl opterations script
+
+`eturnal` offers a handy [operations script](https://eturnal.net/documentation/#Operation)
+which can be called e.g. to check, whether the service is up, to restart the service,
+to query how many active sessions exist, to change logging behaviour and so on.
+
+Hint: If `eturnalctl` is not part of your `$PATH`, consider either sym-linking it (e.g. ´ln -s /opt/eturnal/bin/eturnalctl /usr/local/bin/eturnalctl´) or call it from the default `eturnal` directory directly: e.g. `/opt/eturnal/bin/eturnalctl info`
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index 37a311ad9cc7..4e9e4117cd10 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -9,222 +9,28 @@ allows the homeserver to generate credentials that are valid for use on the
TURN server through the use of a secret shared between the homeserver and the
TURN server.
-The following sections describe how to install [coturn]() (which implements the TURN REST API) and integrate it with synapse.
+This documentation provides two TURN server configuration examples:
+
+* [coturn](setup/turn/coturn.md)
+* [eturnal](setup/turn/eturnal.md)
## Requirements
-For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP.
+For TURN relaying to work, the TURN service must be hosted on a server/endpoint with a public IP.
Hosting TURN behind NAT requires port forwaring and for the NAT gateway to have a public IP.
However, even with appropriate configuration, NAT is known to cause issues and to often not work.
-## `coturn` setup
-
-### Initial installation
-
-The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
-
-#### Debian installation
-
-Just install the debian package:
-
-```sh
-apt install coturn
-```
-
-This will install and start a systemd service called `coturn`.
-
-#### Source installation
-
-1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
-
-1. Configure it:
-
- ```sh
- ./configure
- ```
-
- You may need to install `libevent2`: if so, you should do so in
- the way recommended by your operating system. You can ignore
- warnings about lack of database support: a database is unnecessary
- for this purpose.
-
-1. Build and install it:
-
- ```sh
- make
- make install
- ```
-
-### Configuration
-
-1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
- lines, with example values, are:
-
- ```
- use-auth-secret
- static-auth-secret=[your secret key here]
- realm=turn.myserver.org
- ```
-
- See `turnserver.conf` for explanations of the options. One way to generate
- the `static-auth-secret` is with `pwgen`:
-
- ```sh
- pwgen -s 64 1
- ```
-
- A `realm` must be specified, but its value is somewhat arbitrary. (It is
- sent to clients as part of the authentication flow.) It is conventional to
- set it to be your server name.
-
-1. You will most likely want to configure coturn to write logs somewhere. The
- easiest way is normally to send them to the syslog:
-
- ```sh
- syslog
- ```
-
- (in which case, the logs will be available via `journalctl -u coturn` on a
- systemd system). Alternatively, coturn can be configured to write to a
- logfile - check the example config file supplied with coturn.
-
-1. Consider your security settings. TURN lets users request a relay which will
- connect to arbitrary IP addresses and ports. The following configuration is
- suggested as a minimum starting point:
-
- ```
- # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
- no-tcp-relay
-
- # don't let the relay ever try to connect to private IP address ranges within your network (if any)
- # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
- denied-peer-ip=10.0.0.0-10.255.255.255
- denied-peer-ip=192.168.0.0-192.168.255.255
- denied-peer-ip=172.16.0.0-172.31.255.255
-
- # recommended additional local peers to block, to mitigate external access to internal services.
- # https://www.rtcsec.com/article/slack-webrtc-turn-compromise-and-bug-bounty/#how-to-fix-an-open-turn-relay-to-address-this-vulnerability
- no-multicast-peers
- denied-peer-ip=0.0.0.0-0.255.255.255
- denied-peer-ip=100.64.0.0-100.127.255.255
- denied-peer-ip=127.0.0.0-127.255.255.255
- denied-peer-ip=169.254.0.0-169.254.255.255
- denied-peer-ip=192.0.0.0-192.0.0.255
- denied-peer-ip=192.0.2.0-192.0.2.255
- denied-peer-ip=192.88.99.0-192.88.99.255
- denied-peer-ip=198.18.0.0-198.19.255.255
- denied-peer-ip=198.51.100.0-198.51.100.255
- denied-peer-ip=203.0.113.0-203.0.113.255
- denied-peer-ip=240.0.0.0-255.255.255.255
-
- # special case the turn server itself so that client->TURN->TURN->client flows work
- # this should be one of the turn server's listening IPs
- allowed-peer-ip=10.0.0.1
-
- # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
- user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
- total-quota=1200
- ```
-
-1. Also consider supporting TLS/DTLS. To do this, add the following settings
- to `turnserver.conf`:
-
- ```
- # TLS certificates, including intermediate certs.
- # For Let's Encrypt certificates, use `fullchain.pem` here.
- cert=/path/to/fullchain.pem
-
- # TLS private key file
- pkey=/path/to/privkey.pem
-
- # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed
- #no-tls
- #no-dtls
- ```
-
- In this case, replace the `turn:` schemes in the `turn_uris` settings below
- with `turns:`.
-
- We recommend that you only try to set up TLS/DTLS once you have set up a
- basic installation and got it working.
-
- NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will
- not work with any Matrix client that uses Chromium's WebRTC library. This
- currently includes Element Android & iOS; for more details, see their
- [respective](https://github.com/vector-im/element-android/issues/1533)
- [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying
- [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710).
- Consider using a ZeroSSL certificate for your TURN server as a working alternative.
-
-1. Ensure your firewall allows traffic into the TURN server on the ports
- you've configured it to listen on (By default: 3478 and 5349 for TURN
- traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535
- for the UDP relay.)
-
-1. If your TURN server is behind NAT, the NAT gateway must have an external,
- publicly-reachable IP address. You must configure coturn to advertise that
- address to connecting clients:
-
- ```
- external-ip=EXTERNAL_NAT_IPv4_ADDRESS
- ```
-
- You may optionally limit the TURN server to listen only on the local
- address that is mapped by NAT to the external address:
-
- ```
- listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS
- ```
-
- If your NAT gateway is reachable over both IPv4 and IPv6, you may
- configure coturn to advertise each available address:
-
- ```
- external-ip=EXTERNAL_NAT_IPv4_ADDRESS
- external-ip=EXTERNAL_NAT_IPv6_ADDRESS
- ```
-
- When advertising an external IPv6 address, ensure that the firewall and
- network settings of the system running your TURN server are configured to
- accept IPv6 traffic, and that the TURN server is listening on the local
- IPv6 address that is mapped by NAT to the external IPv6 address.
-
-1. (Re)start the turn server:
-
- * If you used the Debian package (or have set up a systemd unit yourself):
- ```sh
- systemctl restart coturn
- ```
-
- * If you installed from source:
-
- ```sh
- bin/turnserver -o
- ```
+Afterwards, the homeserver needs some further configuration.
## Synapse setup
Your homeserver configuration file needs the following extra keys:
-1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
- for your TURN server to be given out to your clients. Add separate
- entries for each transport your TURN server supports.
-2. "`turn_shared_secret`": This is the secret shared between your
- homeserver and your TURN server, so you should set it to the same
- string you used in turnserver.conf.
-3. "`turn_user_lifetime`": This is the amount of time credentials
- generated by your homeserver are valid for (in milliseconds).
- Shorter times offer less potential for abuse at the expense of
- increased traffic between web clients and your homeserver to
- refresh credentials. The TURN REST API specification recommends
- one day (86400000).
-4. "`turn_allow_guests`": Whether to allow guest users to use the
- TURN server. This is enabled by default, as otherwise VoIP will
- not work reliably for guests. However, it does introduce a
- security risk as it lets guests connect to arbitrary endpoints
- without having gone through a CAPTCHA or similar to register a
- real account.
+1. [`turn_uris`](usage/configuration/config_documentation.md#turn_uris)
+2. [`turn_shared_secret`](usage/configuration/config_documentation.md#turn_shared_secret)
+3. [`turn_user_lifetime`](usage/configuration/config_documentation.md#turn_user_lifetime)
+4. [`turn_allow_guests`](usage/configuration/config_documentation.md#turn_allow_guests)
As an example, here is the relevant section of the config file for `matrix.org`. The
`turn_uris` are appropriate for TURN servers listening on the default ports, with no TLS.
@@ -232,7 +38,7 @@ As an example, here is the relevant section of the config file for `matrix.org`.
turn_uris: [ "turn:turn.matrix.org?transport=udp", "turn:turn.matrix.org?transport=tcp" ]
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
turn_user_lifetime: 86400000
- turn_allow_guests: True
+ turn_allow_guests: true
After updating the homeserver configuration, you must restart synapse:
@@ -263,7 +69,7 @@ Here are a few things to try:
* Check that you have opened your firewall to allow UDP traffic to the UDP
relay ports (49152-65535 by default).
- * Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted)
+ * Try disabling TLS/DTLS listeners and enable only its (unencrypted)
TCP/UDP listeners. (This will only leave signaling traffic unencrypted;
voice & video WebRTC traffic is always encrypted.)
@@ -288,12 +94,19 @@ Here are a few things to try:
* ensure that your TURN server uses the NAT gateway as its default route.
- * Enable more verbose logging in coturn via the `verbose` setting:
+ * Enable more verbose logging, in `coturn` via the `verbose` setting:
```
verbose
```
+ or with `eturnal` with the shell command `eturnalctl loglevel debug` or in the configuration file (the service needs to [reload](https://eturnal.net/documentation/#Operation) for it to become effective):
+
+ ```yaml
+ ## Logging configuration:
+ log_level: debug
+ ```
+
... and then see if there are any clues in its logs.
* If you are using a browser-based client under Chrome, check
@@ -317,7 +130,7 @@ Here are a few things to try:
matrix client to your homeserver in your browser's network inspector. In
the response you should see `username` and `password`. Or:
- * Use the following shell commands:
+ * Use the following shell commands for `coturn`:
```sh
secret=staticAuthSecretHere
@@ -327,11 +140,16 @@ Here are a few things to try:
echo -e "username: $u\npassword: $p"
```
- Or:
+ or for `eturnal`
+
+ ```sh
+ eturnalctl credentials
+ ```
+
- * Temporarily configure coturn to accept a static username/password. To do
- this, comment out `use-auth-secret` and `static-auth-secret` and add the
- following:
+ * Or (**coturn only**): Temporarily configure `coturn` to accept a static
+ username/password. To do this, comment out `use-auth-secret` and
+ `static-auth-secret` and add the following:
```
lt-cred-mech
diff --git a/docs/upgrade.md b/docs/upgrade.md
index b81385b19183..4fe9e4f02e9c 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -88,6 +88,82 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.73.0
+
+## Legacy Prometheus metric names have now been removed
+
+Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
+and offered an option to disable them.
+Synapse v1.71.0 disabled legacy Prometheus metric names by default.
+
+This version, v1.73.0, removes those legacy Prometheus metric names entirely.
+This also means that the `enable_legacy_metrics` configuration option has been
+removed; it will no longer be possible to re-enable the legacy metric names.
+
+If you use metrics and have not yet updated your Grafana dashboard(s),
+Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
+to this version.
+Note that the included Grafana dashboard was updated in v1.72.0 to correct some
+metric names which were missed when legacy metrics were disabled by default.
+
+See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
+for more context.
+
+
+# Upgrading to v1.72.0
+
+## Dropping support for PostgreSQL 10
+
+In line with our [deprecation policy](deprecation_policy.md), we've dropped
+support for PostgreSQL 10, as it is no longer supported upstream.
+
+This release of Synapse requires PostgreSQL 11+.
+
+
+# Upgrading to v1.71.0
+
+## Removal of the `generate_short_term_login_token` module API method
+
+As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
+
+Modules relying on it can instead use the `create_login_token` method.
+
+
+## Changes to the events received by application services (interest)
+
+To align with spec (changed in
+[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
+only considers local users to be interesting. In other words, the `users` namespace
+regex is only be applied against local users of the homeserver.
+
+Please note, this probably doesn't affect the expected behavior of your application
+service, since an interesting local user in a room still means all messages in the room
+(from local or remote users) will still be considered interesting. And matching a room
+with the `rooms` or `aliases` namespace regex will still consider all events sent in the
+room to be interesting to the application service.
+
+If one of your application service's `users` regex was intending to match a remote user,
+this will no longer match as you expect. The behavioral mismatch between matching all
+local users and some remote users is why the spec was changed/clarified and this
+caveat is no longer supported.
+
+
+## Legacy Prometheus metric names are now disabled by default
+
+Synapse v1.71.0 disables legacy Prometheus metric names by default.
+For administrators that still rely on them and have not yet had chance to update their
+uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
+the configuration to re-enable them temporarily.
+
+Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
+it will no longer be possible to re-enable them.
+
+If you do not use metrics or you have already updated your Grafana dashboard(s),
+Prometheus console(s) and alerting rule(s), there is no action needed.
+
+See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
+
+
# Upgrading to v1.69.0
## Changes to the receipts replication streams
diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md
index f11e0b19a63a..c00de2dd447d 100644
--- a/docs/usage/administration/admin_api/README.md
+++ b/docs/usage/administration/admin_api/README.md
@@ -19,7 +19,7 @@ already on your `$PATH` depending on how Synapse was installed.
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
## Making an Admin API request
-For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
+For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
reverse proxy. This means you should typically query the Admin API from a terminal on
the machine which runs Synapse.
diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md
index 7ba5a83f0446..0bfb732464fd 100644
--- a/docs/usage/administration/admin_faq.md
+++ b/docs/usage/administration/admin_faq.md
@@ -79,7 +79,7 @@ Here we can see that the request has been tagged with `GET-37`. (The tag depends
grep 'GET-37' homeserver.log
```
-If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code).
+If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)).
What do all those fields in the 'Processed' line mean?
diff --git a/docs/usage/administration/monthly_active_users.md b/docs/usage/administration/monthly_active_users.md
index d4e90372846a..b1da6f17c2ef 100644
--- a/docs/usage/administration/monthly_active_users.md
+++ b/docs/usage/administration/monthly_active_users.md
@@ -73,12 +73,12 @@ When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMI
Synapse records several different prometheus metrics for MAU.
-`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users.
+`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users.
-`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value.
+`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value.
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
-`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
+`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
registered accounts on the homeserver.
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index df3a62b27e3f..4d32902fea21 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -99,7 +99,7 @@ modules:
config: {}
```
---
-## Server ##
+## Server
Define your homeserver name and other base options.
@@ -159,7 +159,7 @@ including _matrix/...). This is the same URL a user might enter into the
'Custom Homeserver URL' field on their client. If you use Synapse with a
reverse proxy, this should be the URL to reach Synapse via the proxy.
Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
-'listeners' below).
+['listeners'](#listeners) below).
Defaults to `https:///`.
@@ -570,7 +570,7 @@ Example configuration:
delete_stale_devices_after: 1y
```
-## Homeserver blocking ##
+## Homeserver blocking
Useful options for Synapse admins.
---
@@ -858,7 +858,7 @@ which are older than the room's maximum retention period. Synapse will also
filter events received over federation so that events that should have been
purged are ignored and not stored again.
-The message retention policies feature is disabled by default. Please be advised
+The message retention policies feature is disabled by default. Please be advised
that enabling this feature carries some risk. There are known bugs with the implementation
which can cause database corruption. Setting retention to delete older history
is less risky than deleting newer history but in general caution is advised when enabling this
@@ -922,7 +922,7 @@ retention:
interval: 1d
```
---
-## TLS ##
+## TLS
Options related to TLS.
@@ -1012,7 +1012,7 @@ federation_custom_ca_list:
- myCA3.pem
```
---
-## Federation ##
+## Federation
Options related to federation.
@@ -1071,7 +1071,7 @@ Example configuration:
allow_device_name_lookup_over_federation: true
```
---
-## Caching ##
+## Caching
Options related to caching.
@@ -1185,7 +1185,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
`systemctl reload matrix-synapse`.
---
-## Database ##
+## Database
Config options related to database settings.
---
@@ -1332,20 +1332,21 @@ databases:
cp_max: 10
```
---
-## Logging ##
+## Logging
Config options related to logging.
---
### `log_config`
-This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema).
+This option specifies a yaml python logging config file as described
+[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
Example configuration:
```yaml
log_config: "CONFDIR/SERVERNAME.log.config"
```
---
-## Ratelimiting ##
+## Ratelimiting
Options related to ratelimiting in Synapse.
Each ratelimiting configuration is made of two parameters:
@@ -1576,7 +1577,7 @@ Example configuration:
federation_rr_transactions_per_room_per_second: 40
```
---
-## Media Store ##
+## Media Store
Config options related to Synapse's media store.
---
@@ -1766,7 +1767,7 @@ url_preview_ip_range_blacklist:
- 'ff00::/8'
- 'fec0::/10'
```
-----
+---
### `url_preview_ip_range_whitelist`
This option sets a list of IP address CIDR ranges that the URL preview spider is allowed
@@ -1860,7 +1861,7 @@ Example configuration:
- 'fr;q=0.8'
- '*;q=0.7'
```
-----
+---
### `oembed`
oEmbed allows for easier embedding content from a website. It can be
@@ -1877,7 +1878,7 @@ oembed:
- oembed/my_providers.json
```
---
-## Captcha ##
+## Captcha
See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
@@ -1926,7 +1927,7 @@ Example configuration:
recaptcha_siteverify_api: "https://my.recaptcha.site"
```
---
-## TURN ##
+## TURN
Options related to adding a TURN server to Synapse.
---
@@ -1947,7 +1948,7 @@ Example configuration:
```yaml
turn_shared_secret: "YOUR_SHARED_SECRET"
```
-----
+---
### `turn_username` and `turn_password`
The Username and password if the TURN server needs them and does not use a token.
@@ -2088,7 +2089,7 @@ set.
This is primarily intended for use with the `register_new_matrix_user` script
(see [Registering a user](../../setup/installation.md#registering-a-user));
-however, the interface is [documented](../admin_api/register_api.html).
+however, the interface is [documented](../../admin_api/register_api.html).
See also [`registration_shared_secret_path`](#registration_shared_secret_path).
@@ -2366,7 +2367,7 @@ Example configuration:
```yaml
session_lifetime: 24h
```
-----
+---
### `refresh_access_token_lifetime`
Time that an access token remains valid for, if the session is using refresh tokens.
@@ -2422,7 +2423,7 @@ nonrefreshable_access_token_lifetime: 24h
```
---
-## Metrics ###
+## Metrics
Config options related to metrics.
---
@@ -2436,31 +2437,6 @@ Example configuration:
enable_metrics: true
```
---
-### `enable_legacy_metrics`
-
-Set to `true` to publish both legacy and non-legacy Prometheus metric names,
-or to `false` to only publish non-legacy Prometheus metric names.
-Defaults to `true`. Has no effect if `enable_metrics` is `false`.
-**In Synapse v1.71.0, this will default to `false` before being removed in Synapse v1.73.0.**
-
-Legacy metric names include:
-- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
-- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
-
-These legacy metric names are unconventional and not compliant with OpenMetrics standards.
-They are included for backwards compatibility.
-
-Example configuration:
-```yaml
-enable_legacy_metrics: false
-```
-
-See https://github.com/matrix-org/synapse/issues/11106 for context.
-
-*Since v1.67.0.*
-
-**Will be removed in v1.73.0.**
----
### `sentry`
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
@@ -2519,38 +2495,59 @@ Example configuration:
report_stats_endpoint: https://example.com/report-usage-stats/push
```
---
-## API Configuration ##
+## API Configuration
Config settings related to the client/server API
---
### `room_prejoin_state`
-Controls for the state that is shared with users who receive an invite
-to a room. By default, the following state event types are shared with users who
-receive invites to the room:
-- m.room.join_rules
-- m.room.canonical_alias
-- m.room.avatar
-- m.room.encryption
-- m.room.name
-- m.room.create
-- m.room.topic
+This setting controls the state that is shared with users upon receiving an
+invite to a room, or in reply to a knock on a room. By default, the following
+state events are shared with users:
+
+- `m.room.join_rules`
+- `m.room.canonical_alias`
+- `m.room.avatar`
+- `m.room.encryption`
+- `m.room.name`
+- `m.room.create`
+- `m.room.topic`
To change the default behavior, use the following sub-options:
-* `disable_default_event_types`: set to true to disable the above defaults. If this
- is enabled, only the event types listed in `additional_event_types` are shared.
- Defaults to false.
-* `additional_event_types`: Additional state event types to share with users when they are invited
- to a room. By default, this list is empty (so only the default event types are shared).
+* `disable_default_event_types`: boolean. Set to `true` to disable the above
+ defaults. If this is enabled, only the event types listed in
+ `additional_event_types` are shared. Defaults to `false`.
+* `additional_event_types`: A list of additional state events to include in the
+ events to be shared. By default, this list is empty (so only the default event
+ types are shared).
+
+ Each entry in this list should be either a single string or a list of two
+ strings.
+ * A standalone string `t` represents all events with type `t` (i.e.
+ with no restrictions on state keys).
+ * A pair of strings `[t, s]` represents a single event with type `t` and
+ state key `s`. The same type can appear in two entries with different state
+ keys: in this situation, both state keys are included in prejoin state.
Example configuration:
```yaml
room_prejoin_state:
- disable_default_event_types: true
+ disable_default_event_types: false
additional_event_types:
- - org.example.custom.event.type
- - m.room.join_rules
+ # Share all events of type `org.example.custom.event.typeA`
+ - org.example.custom.event.typeA
+ # Share only events of type `org.example.custom.event.typeB` whose
+ # state_key is "foo"
+ - ["org.example.custom.event.typeB", "foo"]
+ # Share only events of type `org.example.custom.event.typeC` whose
+ # state_key is "bar" or "baz"
+ - ["org.example.custom.event.typeC", "bar"]
+ - ["org.example.custom.event.typeC", "baz"]
```
+
+*Changed in Synapse 1.74:* admins can filter the events in prejoin state based
+on their state key.
+
---
### `track_puppeted_user_ips`
@@ -2619,7 +2616,7 @@ Example configuration:
form_secret:
```
---
-## Signing Keys ##
+## Signing Keys
Config options relating to signing keys
---
@@ -2680,6 +2677,12 @@ is still supported for backwards-compatibility, but it is deprecated.
warning on start-up. To suppress this warning, set
`suppress_key_server_warning` to true.
+If the use of a trusted key server has to be deactivated, e.g. in a private
+federation or for privacy reasons, this can be realised by setting
+an empty array (`trusted_key_servers: []`). Then Synapse will request the keys
+directly from the server that owns the keys. If Synapse does not get keys directly
+from the server, the events of this server will be rejected.
+
Options for each entry in the list include:
* `server_name`: the name of the server. Required.
* `verify_keys`: an optional map from key id to base64-encoded public key.
@@ -2728,7 +2731,7 @@ Example configuration:
key_server_signing_keys_path: "key_server_signing_keys.key"
```
---
-## Single sign-on integration ##
+## Single sign-on integration
The following settings can be used to make Synapse use a single sign-on
provider for authentication, instead of its internal password database.
@@ -2986,10 +2989,17 @@ Options for each entry include:
For the default provider, the following settings are available:
- * subject_claim: name of the claim containing a unique identifier
+ * `subject_claim`: name of the claim containing a unique identifier
for the user. Defaults to 'sub', which OpenID Connect
compliant providers should provide.
+ * `picture_claim`: name of the claim containing an url for the user's profile picture.
+ Defaults to 'picture', which OpenID Connect compliant providers should provide
+ and has to refer to a direct image file such as PNG, JPEG, or GIF image file.
+
+ Currently only supported in monolithic (single-process) server configurations
+ where the media repository runs within the Synapse process.
+
* `localpart_template`: Jinja2 template for the localpart of the MXID.
If this is not set, the user will be prompted to choose their
own username (see the documentation for the `sso_auth_account_details.html`
@@ -3014,6 +3024,15 @@ Options for each entry include:
which is set to the claims returned by the UserInfo Endpoint and/or
in the ID Token.
+* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
+ Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
+ Defaults to `false`.
+
+* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the
+ `sub` claim matches the subject claim received during login. This check can be disabled by setting
+ this to `true`. Defaults to `false`.
+
+ You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`.
It is possible to configure Synapse to only allow logins if certain attributes
match particular values in the OIDC userinfo. The requirements can be listed under
@@ -3348,7 +3367,7 @@ email:
email_validation: "[%(server_name)s] Validate your email"
```
---
-## Push ##
+## Push
Configuration settings related to push notifications
---
@@ -3357,6 +3376,10 @@ Configuration settings related to push notifications
This setting defines options for push notifications.
This option has a number of sub-options. They are as follows:
+* `enabled`: Enables or disables push notification calculation. Note, disabling this will also
+ stop unread counts being calculated for rooms. This mode of operation is intended
+ for homeservers which may only have bots or appservice users connected, or are otherwise
+ not interested in push/unread counters. This is enabled by default.
* `include_content`: Clients requesting push notifications can either have the body of
the message sent in the notification poke along with other details
like the sender, or just the event ID and room ID (`event_id_only`).
@@ -3377,15 +3400,16 @@ This option has a number of sub-options. They are as follows:
Example configuration:
```yaml
push:
+ enabled: true
include_content: false
group_unread_count_by_room: false
```
---
-## Rooms ##
+## Rooms
Config options relating to rooms.
---
-### `encryption_enabled_by_default`
+### `encryption_enabled_by_default_for_room_type`
Controls whether locally-created rooms should be end-to-end encrypted by
default.
@@ -3422,7 +3446,7 @@ This option has the following sub-options:
NB. If you set this to true, and the last time the user_directory search
indexes were (re)built was before Synapse 1.44, you'll have to
rebuild the indexes in order to search through all known users.
-
+
These indexes are built the first time Synapse starts; admins can
manually trigger a rebuild via the API following the instructions
[for running background updates](../administration/admin_api/background_updates.md#run),
@@ -3541,9 +3565,9 @@ Example configuration:
enable_room_list_search: false
```
---
-### `alias_creation`
+### `alias_creation_rules`
-The `alias_creation` option controls who is allowed to create aliases
+The `alias_creation_rules` option controls who is allowed to create aliases
on this server.
The format of this option is a list of rules that contain globs that
@@ -3627,7 +3651,7 @@ default_power_level_content_override:
```
---
-## Opentracing ##
+## Opentracing
Configuration options related to Opentracing support.
---
@@ -3670,14 +3694,78 @@ opentracing:
false
```
---
-## Workers ##
-Configuration options related to workers.
+## Coordinating workers
+Configuration options related to workers which belong in the main config file
+(usually called `homeserver.yaml`).
+A Synapse deployment can scale horizontally by running multiple Synapse processes
+called _workers_. Incoming requests are distributed between workers to handle higher
+loads. Some workers are privileged and can accept requests from other workers.
+
+As a result, the worker configuration is divided into two parts.
+
+1. The first part (in this section of the manual) defines which shardable tasks
+ are delegated to privileged workers. This allows unprivileged workers to make
+ requests to a privileged worker to act on their behalf.
+1. [The second part](#individual-worker-configuration)
+ controls the behaviour of individual workers in isolation.
+
+For guidance on setting up workers, see the [worker documentation](../../workers.md).
+
+---
+### `worker_replication_secret`
+
+A shared secret used by the replication APIs on the main process to authenticate
+HTTP requests from workers.
+
+The default, this value is omitted (equivalently `null`), which means that
+traffic between the workers and the main process is not authenticated.
+
+Example configuration:
+```yaml
+worker_replication_secret: "secret_secret"
+```
+---
+### `start_pushers`
+
+Unnecessary to set if using [`pusher_instances`](#pusher_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
+
+Controls sending of push notifications on the main process. Set to `false`
+if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
+
+Example configuration:
+```yaml
+start_pushers: false
+```
+---
+### `pusher_instances`
+
+It is possible to scale the processes that handle sending push notifications to [sygnal](https://github.com/matrix-org/sygnal)
+and email by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
+a `pusher_instances` map. Doing so will remove handling of this function from the main
+process. Multiple workers can be added to this map, in which case the work is balanced
+across them. Ensure the main process and all pusher workers are restarted after changing
+this option.
+
+Example configuration for a single worker:
+```yaml
+pusher_instances:
+ - pusher_worker1
+```
+And for multiple workers:
+```yaml
+pusher_instances:
+ - pusher_worker1
+ - pusher_worker2
+```
---
### `send_federation`
+Unnecessary to set if using [`federation_sender_instances`](#federation_sender_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
+
Controls sending of outbound federation transactions on the main process.
-Set to false if using a federation sender worker. Defaults to true.
+Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
+Defaults to `true`.
Example configuration:
```yaml
@@ -3686,24 +3774,37 @@ send_federation: false
---
### `federation_sender_instances`
-It is possible to run multiple federation sender workers, in which case the
-work is balanced across them. Use this setting to list the senders.
+It is possible to scale the processes that handle sending outbound federation requests
+by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
+a `federation_sender_instances` map. Doing so will remove handling of this function from
+the main process. Multiple workers can be added to this map, in which case the work is
+balanced across them.
-This configuration setting must be shared between all federation sender workers, and if
-changed all federation sender workers must be stopped at the same time and then
-started, to ensure that all instances are running with the same config (otherwise
+This configuration setting must be shared between all workers handling federation
+sending, and if changed all federation sender workers must be stopped at the same time
+and then started, to ensure that all instances are running with the same config (otherwise
events may be dropped).
-Example configuration:
+Example configuration for a single worker:
```yaml
federation_sender_instances:
- federation_sender1
```
+And for multiple workers:
+```yaml
+federation_sender_instances:
+ - federation_sender1
+ - federation_sender2
+```
---
### `instance_map`
-When using workers this should be a map from worker name to the
+When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured.
+Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
+a HTTP replication listener, and that listener should be included in the `instance_map`.
+(The main process also needs an HTTP replication listener, but it should not be
+listed in the `instance_map`.)
Example configuration:
```yaml
@@ -3716,8 +3817,11 @@ instance_map:
### `stream_writers`
Experimental: When using workers you can define which workers should
-handle event persistence and typing notifications. Any worker
-specified here must also be in the `instance_map`.
+handle writing to streams such as event persistence and typing notifications.
+Any worker specified here must also be in the [`instance_map`](#instance_map).
+
+See the list of available streams in the
+[worker documentation](../../workers.md#stream-writers).
Example configuration:
```yaml
@@ -3728,29 +3832,18 @@ stream_writers:
---
### `run_background_tasks_on`
-The worker that is used to run background tasks (e.g. cleaning up expired
-data). If not provided this defaults to the main process.
+The [worker](../../workers.md#background-tasks) that is used to run
+background tasks (e.g. cleaning up expired data). If not provided this
+defaults to the main process.
Example configuration:
```yaml
run_background_tasks_on: worker1
```
---
-### `worker_replication_secret`
-
-A shared secret used by the replication APIs to authenticate HTTP requests
-from workers.
-
-By default this is unused and traffic is not authenticated.
-
-Example configuration:
-```yaml
-worker_replication_secret: "secret_secret"
-```
### `redis`
-Configuration for Redis when using workers. This *must* be enabled when
-using workers (unless using old style direct TCP configuration).
+Configuration for Redis when using workers. This *must* be enabled when using workers.
This setting has the following sub-options:
* `enabled`: whether to use Redis support. Defaults to false.
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
@@ -3765,7 +3858,143 @@ redis:
port: 6379
password:
```
-## Background Updates ##
+---
+## Individual worker configuration
+These options configure an individual worker, in its worker configuration file.
+They should be not be provided when configuring the main process.
+
+Note also the configuration above for
+[coordinating a cluster of workers](#coordinating-workers).
+
+For guidance on setting up workers, see the [worker documentation](../../workers.md).
+
+---
+### `worker_app`
+
+The type of worker. The currently available worker applications are listed
+in [worker documentation](../../workers.md#available-worker-applications).
+
+The most common worker is the
+[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker).
+
+Example configuration:
+```yaml
+worker_app: synapse.app.generic_worker
+```
+---
+### `worker_name`
+
+A unique name for the worker. The worker needs a name to be addressed in
+further parameters and identification in log files. We strongly recommend
+giving each worker a unique `worker_name`.
+
+Example configuration:
+```yaml
+worker_name: generic_worker1
+```
+---
+### `worker_replication_host`
+
+The HTTP replication endpoint that it should talk to on the main Synapse process.
+The main Synapse process defines this with a `replication` resource in
+[`listeners` option](#listeners).
+
+Example configuration:
+```yaml
+worker_replication_host: 127.0.0.1
+```
+---
+### `worker_replication_http_port`
+
+The HTTP replication port that it should talk to on the main Synapse process.
+The main Synapse process defines this with a `replication` resource in
+[`listeners` option](#listeners).
+
+Example configuration:
+```yaml
+worker_replication_http_port: 9093
+```
+---
+### `worker_replication_http_tls`
+
+Whether TLS should be used for talking to the HTTP replication port on the main
+Synapse process.
+The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
+has the `replication` resource enabled.
+
+**Please note:** by default, it is not safe to expose replication ports to the
+public Internet, even with TLS enabled.
+See [`worker_replication_secret`](#worker_replication_secret).
+
+Defaults to `false`.
+
+*Added in Synapse 1.72.0.*
+
+Example configuration:
+```yaml
+worker_replication_http_tls: true
+```
+---
+### `worker_listeners`
+
+A worker can handle HTTP requests. To do so, a `worker_listeners` option
+must be declared, in the same way as the [`listeners` option](#listeners)
+in the shared config.
+
+Workers declared in [`stream_writers`](#stream_writers) will need to include a
+`replication` listener here, in order to accept internal HTTP requests from
+other workers.
+
+Example configuration:
+```yaml
+worker_listeners:
+ - type: http
+ port: 8083
+ resources:
+ - names: [client, federation]
+```
+---
+### `worker_daemonize`
+
+Specifies whether the worker should be started as a daemon process.
+If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
+must be omitted or set to `false`.
+
+Defaults to `false`.
+
+Example configuration:
+```yaml
+worker_daemonize: true
+```
+---
+### `worker_pid_file`
+
+When running a worker as a daemon, we need a place to store the
+[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
+This option defines the location of that "pid file".
+
+This option is required if `worker_daemonize` is `true` and ignored
+otherwise. It has no default.
+
+See also the [`pid_file` option](#pid_file) option for the main Synapse process.
+
+Example configuration:
+```yaml
+worker_pid_file: DATADIR/generic_worker1.pid
+```
+---
+### `worker_log_config`
+
+This option specifies a yaml python logging config file as described
+[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
+See also the [`log_config` option](#log_config) option for the main Synapse process.
+
+Example configuration:
+```yaml
+worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
+```
+---
+## Background Updates
Configuration settings related to background updates.
---
@@ -3794,4 +4023,3 @@ background_updates:
min_batch_size: 10
default_batch_size: 50
```
-
diff --git a/docs/workers.md b/docs/workers.md
index 27041ea57c75..59a6487e0d6a 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -88,10 +88,12 @@ shared configuration file.
### Shared configuration
Normally, only a couple of changes are needed to make an existing configuration
-file suitable for use with workers. First, you need to enable an "HTTP replication
-listener" for the main process; and secondly, you need to enable redis-based
-replication. Optionally, a shared secret can be used to authenticate HTTP
-traffic between workers. For example:
+file suitable for use with workers. First, you need to enable an
+["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
+for the main process; and secondly, you need to enable
+[redis-based replication](usage/configuration/config_documentation.md#redis).
+Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
+can be used to authenticate HTTP traffic between workers. For example:
```yaml
# extend the existing `listeners` section. This defines the ports that the
@@ -111,27 +113,30 @@ redis:
enabled: true
```
-See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option.
+See the [configuration manual](usage/configuration/config_documentation.md)
+for the full documentation of each option.
Under **no circumstances** should the replication listener be exposed to the
public internet; replication traffic is:
* always unencrypted
-* unauthenticated, unless `worker_replication_secret` is configured
+* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret)
+ is configured
### Worker configuration
In the config file for each worker, you must specify:
- * The type of worker (`worker_app`). The currently available worker applications are listed below.
- * A unique name for the worker (`worker_name`).
+ * The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
+ The currently available worker applications are listed [below](#available-worker-applications).
+ * A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
* The HTTP replication endpoint that it should talk to on the main synapse process
- (`worker_replication_host` and `worker_replication_http_port`)
- * If handling HTTP requests, a `worker_listeners` option with an `http`
- listener, in the same way as the [`listeners`](usage/configuration/config_documentation.md#listeners)
- option in the shared config.
- * If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
- the main process (`worker_main_http_uri`).
+ ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
+ [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
+ * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
+ with an `http` listener.
+ * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
+ the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
For example:
@@ -146,7 +151,6 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
Obviously you should configure your reverse-proxy to route the relevant
endpoints to the worker (`localhost:8083` in the above example).
-
### Running Synapse with workers
Finally, you need to start your worker processes. This can be done with either
@@ -187,6 +191,7 @@ information.
^/_matrix/federation/(v1|v2)/send_leave/
^/_matrix/federation/(v1|v2)/invite/
^/_matrix/federation/v1/event_auth/
+ ^/_matrix/federation/v1/timestamp_to_event/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/user/devices/
^/_matrix/key/v2/query
@@ -203,6 +208,8 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
^/_matrix/client/v1/rooms/.*/hierarchy$
+ ^/_matrix/client/(v1|unstable)/rooms/.*/relations/
+ ^/_matrix/client/v1/rooms/.*/threads$
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
@@ -212,10 +219,10 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
+ ^/_matrix/client/v1/rooms/.*/timestamp_to_event$
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
# Encryption requests
- # Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
^/_matrix/client/(r0|v3|unstable)/keys/query$
^/_matrix/client/(r0|v3|unstable)/keys/changes$
^/_matrix/client/(r0|v3|unstable)/keys/claim$
@@ -286,7 +293,8 @@ For multiple workers not handling the SSO endpoints properly, see
[#9427](https://github.com/matrix-org/synapse/issues/9427).
Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
-with `client` and `federation` `resources` must be configured in the `worker_listeners`
+with `client` and `federation` `resources` must be configured in the
+[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
option in the worker config.
#### Load balancing
@@ -298,9 +306,11 @@ may wish to run multiple groups of workers handling different endpoints so that
load balancing can be done in different ways.
For `/sync` and `/initialSync` requests it will be more efficient if all
-requests from a particular user are routed to a single instance. Extracting a
-user ID from the access token or `Authorization` header is currently left as an
-exercise for the reader. Admins may additionally wish to separate out `/sync`
+requests from a particular user are routed to a single instance. This can
+be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
+`hash $http_authorization consistent;` which contains the users access token.
+
+Admins may additionally wish to separate out `/sync`
requests that have a `since` query parameter from those that don't (and
`/initialSync`), as requests that don't are known as "initial sync" that happens
when a user logs in on a new device and can be *very* resource intensive, so
@@ -329,9 +339,10 @@ of the main process to a particular worker.
To enable this, the worker must have a
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
-have a `worker_name` and be listed in the `instance_map` config. The same worker
-can handle multiple streams, but unless otherwise documented, each stream can only
-have a single writer.
+have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
+and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
+config. The same worker can handle multiple streams, but unless otherwise documented,
+each stream can only have a single writer.
For example, to move event persistence off to a dedicated worker, the shared
configuration would include:
@@ -358,9 +369,26 @@ streams and the endpoints associated with them:
##### The `events` stream
-The `events` stream experimentally supports having multiple writers, where work
-is sharded between them by room ID. Note that you *must* restart all worker
-instances when adding or removing event persisters. An example `stream_writers`
+The `events` stream experimentally supports having multiple writer workers, where load
+is sharded between them by room ID. Each writer is called an _event persister_. They are
+responsible for
+- receiving new events,
+- linking them to those already in the room [DAG](development/room-dag-concepts.md),
+- persisting them to the DB, and finally
+- updating the events stream.
+
+Because load is sharded in this way, you *must* restart all worker instances when
+adding or removing event persisters.
+
+An `event_persister` should not be mistaken for an `event_creator`.
+An `event_creator` listens for requests from clients to create new events and does
+so. It will then pass those events over HTTP replication to any configured event
+persisters (or the main process if none are configured).
+
+Note that `event_creator`s and `event_persister`s are implemented using the same
+[`synapse.app.generic_worker`](#synapse.app.generic_worker).
+
+An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers)
configuration with multiple writers:
```yaml
@@ -414,16 +442,18 @@ worker. Background tasks are run periodically or started via replication. Exactl
which tasks are configured to run depends on your Synapse configuration (e.g. if
stats is enabled). This worker doesn't handle any REST endpoints itself.
-To enable this, the worker must have a `worker_name` and can be configured to run
-background tasks. For example, to move background tasks to a dedicated worker,
-the shared configuration would include:
+To enable this, the worker must have a unique
+[`worker_name`](usage/configuration/config_documentation.md#worker_name)
+and can be configured to run background tasks. For example, to move background tasks
+to a dedicated worker, the shared configuration would include:
```yaml
run_background_tasks_on: background_worker
```
-You might also wish to investigate the `update_user_directory_from_worker` and
-`media_instance_running_background_jobs` settings.
+You might also wish to investigate the
+[`update_user_directory_from_worker`](#updating-the-user-directory) and
+[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings.
An example for a dedicated background worker instance:
@@ -475,14 +505,21 @@ worker application type.
### `synapse.app.pusher`
+It is likely this option will be deprecated in the future and is not recommended for new
+installations. Instead, [use `synapse.app.generic_worker` with the `pusher_instances`](usage/configuration/config_documentation.md#pusher_instances).
+
Handles sending push notifications to sygnal and email. Doesn't handle any
-REST endpoints itself, but you should set `start_pushers: False` in the
+REST endpoints itself, but you should set
+[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
shared configuration file to stop the main synapse sending push notifications.
-To run multiple instances at once the `pusher_instances` option should list all
-pusher instances by their worker name, e.g.:
+To run multiple instances at once the
+[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances)
+option should list all pusher instances by their
+[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.:
```yaml
+start_pushers: false
pusher_instances:
- pusher_worker1
- pusher_worker2
@@ -509,16 +546,24 @@ Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.federation_sender`
+It is likely this option will be deprecated in the future and not recommended for
+new installations. Instead, [use `synapse.app.generic_worker` with the `federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances).
+
Handles sending federation traffic to other servers. Doesn't handle any
-REST endpoints itself, but you should set `send_federation: False` in the
-shared configuration file to stop the main synapse sending this traffic.
+REST endpoints itself, but you should set
+[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
+in the shared configuration file to stop the main synapse sending this traffic.
If running multiple federation senders then you must list each
-instance in the `federation_sender_instances` option by their `worker_name`.
+instance in the
+[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances)
+option by their
+[`worker_name`](usage/configuration/config_documentation.md#worker_name).
All instances must be stopped and started when adding or removing instances.
For example:
```yaml
+send_federation: false
federation_sender_instances:
- federation_sender1
- federation_sender2
@@ -545,7 +590,9 @@ Handles the media repository. It can handle all endpoints starting with:
^/_synapse/admin/v1/quarantine_media/.*$
^/_synapse/admin/v1/users/.*/media$
-You should also set `enable_media_repo: False` in the shared configuration
+You should also set
+[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo)
+in the shared configuration
file to stop the main synapse running background jobs related to managing the
media repository. Note that doing so will prevent the main process from being
able to handle the above endpoints.
@@ -598,7 +645,9 @@ equivalent to `synapse.app.generic_worker`:
* `synapse.app.client_reader`
* `synapse.app.event_creator`
* `synapse.app.federation_reader`
+ * `synapse.app.federation_sender`
* `synapse.app.frontend_proxy`
+ * `synapse.app.pusher`
* `synapse.app.synchrotron`
diff --git a/mypy.ini b/mypy.ini
index 34b4523e007e..37acf589c90f 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -11,6 +11,8 @@ warn_unused_ignores = True
local_partial_types = True
no_implicit_optional = True
disallow_untyped_defs = True
+strict_equality = True
+warn_redundant_casts = True
files =
docker/,
@@ -56,24 +58,8 @@ exclude = (?x)
|tests/rest/media/v1/test_media_storage.py
|tests/server.py
|tests/server_notices/test_resource_limits_server_notices.py
- |tests/test_metrics.py
|tests/test_state.py
|tests/test_terms_auth.py
- |tests/util/caches/test_cached_call.py
- |tests/util/caches/test_deferred_cache.py
- |tests/util/caches/test_descriptors.py
- |tests/util/caches/test_response_cache.py
- |tests/util/caches/test_ttlcache.py
- |tests/util/test_async_helpers.py
- |tests/util/test_batching_queue.py
- |tests/util/test_dict_cache.py
- |tests/util/test_expiring_cache.py
- |tests/util/test_file_consumer.py
- |tests/util/test_linearizer.py
- |tests/util/test_logcontext.py
- |tests/util/test_lrucache.py
- |tests/util/test_rwlock.py
- |tests/util/test_wheel_timer.py
)$
[mypy-synapse.federation.transport.client]
@@ -103,33 +89,50 @@ disallow_untyped_defs = False
[mypy-tests.*]
disallow_untyped_defs = False
+[mypy-tests.config.test_api]
+disallow_untyped_defs = True
+
+[mypy-tests.federation.transport.test_client]
+disallow_untyped_defs = True
+
+[mypy-tests.handlers.test_sso]
+disallow_untyped_defs = True
+
[mypy-tests.handlers.test_user_directory]
disallow_untyped_defs = True
+[mypy-tests.metrics.test_background_process_metrics]
+disallow_untyped_defs = True
+
[mypy-tests.push.test_bulk_push_rule_evaluator]
disallow_untyped_defs = True
-[mypy-tests.test_server]
+[mypy-tests.rest.*]
disallow_untyped_defs = True
[mypy-tests.state.test_profile]
disallow_untyped_defs = True
-[mypy-tests.storage.test_profile]
+[mypy-tests.storage.*]
disallow_untyped_defs = True
-[mypy-tests.storage.test_user_directory]
+[mypy-tests.test_server]
disallow_untyped_defs = True
-[mypy-tests.rest.*]
+[mypy-tests.types.*]
disallow_untyped_defs = True
-[mypy-tests.federation.transport.test_client]
+[mypy-tests.util.caches.*]
disallow_untyped_defs = True
-[mypy-tests.utils]
+[mypy-tests.util.caches.test_descriptors]
+disallow_untyped_defs = False
+
+[mypy-tests.util.*]
disallow_untyped_defs = True
+[mypy-tests.utils]
+disallow_untyped_defs = True
;; Dependencies without annotations
;; Before ignoring a module, check to see if type stubs are available.
diff --git a/poetry.lock b/poetry.lock
index a20fe4410513..8040944b8ceb 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,20 +1,20 @@
[[package]]
name = "attrs"
-version = "21.4.0"
+version = "22.1.0"
description = "Classes Without Boilerplate"
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.5"
[package.extras]
-dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "sphinx", "sphinx-notfound-page", "zope.interface"]
+dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"]
docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
-tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "zope.interface"]
-tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six"]
+tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"]
+tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
[[package]]
name = "Authlib"
-version = "1.1.0"
+version = "1.2.0"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
category = "main"
optional = true
@@ -24,8 +24,8 @@ python-versions = "*"
cryptography = ">=3.2"
[[package]]
-name = "automat"
-version = "20.2.0"
+name = "Automat"
+version = "22.10.0"
description = "Self-service finite-state machines for the programmer on the go."
category = "main"
optional = false
@@ -40,34 +40,30 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
[[package]]
name = "bcrypt"
-version = "3.2.0"
+version = "4.0.1"
description = "Modern password hashing for your software and your servers"
category = "main"
optional = false
python-versions = ">=3.6"
-[package.dependencies]
-cffi = ">=1.1"
-six = ">=1.4.1"
-
[package.extras]
tests = ["pytest (>=3.2.1,!=3.3.0)"]
typecheck = ["mypy"]
[[package]]
name = "black"
-version = "22.3.0"
+version = "22.12.0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
-python-versions = ">=3.6.2"
+python-versions = ">=3.7"
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
pathspec = ">=0.9.0"
platformdirs = ">=2"
-tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""}
typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
@@ -79,20 +75,23 @@ uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "bleach"
-version = "4.1.0"
+version = "5.0.1"
description = "An easy safelist-based HTML-sanitizing tool."
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-packaging = "*"
six = ">=1.9.0"
webencodings = "*"
+[package.extras]
+css = ["tinycss2 (>=1.1.0,<1.2)"]
+dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "hashin (==0.17.0)", "mypy (==0.961)", "pip-tools (==6.6.2)", "pytest (==7.1.2)", "tox (==3.25.0)", "twine (==4.0.1)", "wheel (==0.37.1)"]
+
[[package]]
name = "canonicaljson"
-version = "1.6.3"
+version = "1.6.4"
description = "Canonical JSON"
category = "main"
optional = false
@@ -107,15 +106,15 @@ frozendict = ["frozendict (>=1.0)"]
[[package]]
name = "certifi"
-version = "2021.10.8"
+version = "2022.12.7"
description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
[[package]]
name = "cffi"
-version = "1.15.0"
+version = "1.15.1"
description = "Foreign Function Interface for Python calling C code."
category = "main"
optional = false
@@ -126,18 +125,18 @@ pycparser = "*"
[[package]]
name = "charset-normalizer"
-version = "2.0.12"
+version = "2.1.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false
-python-versions = ">=3.5.0"
+python-versions = ">=3.6.0"
[package.extras]
unicode_backport = ["unicodedata2"]
[[package]]
name = "click"
-version = "8.1.1"
+version = "8.1.3"
description = "Composable command line interface toolkit"
category = "dev"
optional = false
@@ -160,11 +159,11 @@ click = "*"
[[package]]
name = "colorama"
-version = "0.4.4"
+version = "0.4.6"
description = "Cross-platform colored terminal text."
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
[[package]]
name = "commonmark"
@@ -187,7 +186,7 @@ python-versions = "*"
[[package]]
name = "cryptography"
-version = "36.0.1"
+version = "38.0.4"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
category = "main"
optional = false
@@ -197,12 +196,12 @@ python-versions = ">=3.6"
cffi = ">=1.12"
[package.extras]
-docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx_rtd_theme"]
+docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"]
docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
-sdist = ["setuptools_rust (>=0.11.4)"]
+sdist = ["setuptools-rust (>=0.11.4)"]
ssh = ["bcrypt (>=3.1.5)"]
-test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
+test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
[[package]]
name = "defusedxml"
@@ -213,7 +212,7 @@ optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[[package]]
-name = "deprecated"
+name = "Deprecated"
version = "1.2.13"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
category = "dev"
@@ -228,55 +227,55 @@ dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version
[[package]]
name = "docutils"
-version = "0.18.1"
+version = "0.19"
description = "Docutils -- Python Documentation Utilities"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.7"
[[package]]
name = "elementpath"
-version = "2.5.0"
-description = "XPath 1.0/2.0 parsers and selectors for ElementTree and lxml"
+version = "3.0.2"
+description = "XPath 1.0/2.0/3.0 parsers and selectors for ElementTree and lxml"
category = "main"
optional = true
python-versions = ">=3.7"
[package.extras]
-dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910)", "tox", "xmlschema (>=1.8.0)"]
+dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", "memray", "mypy (==0.971)", "tox", "xmlschema (>=2.0.0)"]
[[package]]
name = "flake8"
-version = "4.0.1"
+version = "5.0.4"
description = "the modular source code checker: pep8 pyflakes and co"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.6.1"
[package.dependencies]
-importlib-metadata = {version = "<4.3", markers = "python_version < \"3.8\""}
-mccabe = ">=0.6.0,<0.7.0"
-pycodestyle = ">=2.8.0,<2.9.0"
-pyflakes = ">=2.4.0,<2.5.0"
+importlib-metadata = {version = ">=1.1.0,<4.3", markers = "python_version < \"3.8\""}
+mccabe = ">=0.7.0,<0.8.0"
+pycodestyle = ">=2.9.0,<2.10.0"
+pyflakes = ">=2.5.0,<2.6.0"
[[package]]
name = "flake8-bugbear"
-version = "21.3.2"
+version = "22.12.6"
description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle."
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
attrs = ">=19.2.0"
flake8 = ">=3.0.0"
[package.extras]
-dev = ["black", "coverage", "hypothesis", "hypothesmith"]
+dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "tox"]
[[package]]
name = "flake8-comprehensions"
-version = "3.8.0"
+version = "3.10.1"
description = "A flake8 plugin to help you write better list/set/dict comprehensions."
category = "dev"
optional = false
@@ -296,18 +295,18 @@ python-versions = ">=3.6"
[[package]]
name = "gitdb"
-version = "4.0.9"
+version = "4.0.10"
description = "Git Object Database"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
smmap = ">=3.0.1,<6"
[[package]]
-name = "gitpython"
-version = "3.1.27"
+name = "GitPython"
+version = "3.1.29"
description = "GitPython is a python library used to interact with Git repositories"
category = "dev"
optional = false
@@ -319,11 +318,11 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""
[[package]]
name = "hiredis"
-version = "2.0.0"
+version = "2.1.0"
description = "Python wrapper for hiredis"
category = "main"
optional = true
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[[package]]
name = "hyperlink"
@@ -338,7 +337,7 @@ idna = ">=2.5"
[[package]]
name = "idna"
-version = "3.3"
+version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)"
category = "main"
optional = false
@@ -370,43 +369,44 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517",
[[package]]
name = "importlib-resources"
-version = "5.4.0"
+version = "5.10.1"
description = "Read resources from Python packages"
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
[package.extras]
-docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
-testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
[[package]]
name = "incremental"
-version = "21.3.0"
-description = "A small library that versions your Python projects."
+version = "22.10.0"
+description = "\"A small library that versions your Python projects.\""
category = "main"
optional = false
python-versions = "*"
[package.extras]
+mypy = ["click (>=6.0)", "mypy (==0.812)", "twisted (>=16.4.0)"]
scripts = ["click (>=6.0)", "twisted (>=16.4.0)"]
[[package]]
name = "isort"
-version = "5.10.1"
+version = "5.11.2"
description = "A Python utility / library to sort Python imports."
category = "dev"
optional = false
-python-versions = ">=3.6.1,<4.0"
+python-versions = ">=3.7.0"
[package.extras]
colors = ["colorama (>=0.4.3,<0.5.0)"]
-pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
+pipfile-deprecated-finder = ["pipreqs", "requirementslib"]
plugins = ["setuptools"]
-requirements_deprecated_finder = ["pip-api", "pipreqs"]
+requirements-deprecated-finder = ["pip-api", "pipreqs"]
[[package]]
name = "jaeger-client"
@@ -425,25 +425,40 @@ tornado = ">=4.3"
[package.extras]
tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"]
+[[package]]
+name = "jaraco.classes"
+version = "3.2.3"
+description = "Utility functions for Python class constructs"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+more-itertools = "*"
+
+[package.extras]
+docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
[[package]]
name = "jeepney"
-version = "0.7.1"
+version = "0.8.0"
description = "Low-level, pure Python DBus protocol wrapper."
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.extras]
-test = ["async-timeout", "pytest", "pytest-asyncio", "pytest-trio", "testpath", "trio"]
+test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"]
trio = ["async_generator", "trio"]
[[package]]
-name = "jinja2"
-version = "3.0.3"
+name = "Jinja2"
+version = "3.1.2"
description = "A very fast and expressive template engine."
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
MarkupSafe = ">=2.0"
@@ -453,7 +468,7 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
-version = "4.16.0"
+version = "4.17.3"
description = "An implementation of JSON Schema validation for Python"
category = "main"
optional = false
@@ -473,21 +488,22 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-
[[package]]
name = "keyring"
-version = "23.5.0"
+version = "23.9.3"
description = "Store and access your passwords safely."
category = "dev"
optional = false
python-versions = ">=3.7"
[package.dependencies]
-importlib-metadata = ">=3.6"
+importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""}
+"jaraco.classes" = "*"
jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
pywin32-ctypes = {version = "<0.1.0 || >0.1.0,<0.1.1 || >0.1.1", markers = "sys_platform == \"win32\""}
SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""}
[package.extras]
-docs = ["jaraco.packaging (>=8.2)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"]
-testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
+docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
[[package]]
name = "ldap3"
@@ -502,7 +518,7 @@ pyasn1 = ">=0.4.6"
[[package]]
name = "lxml"
-version = "4.9.1"
+version = "4.9.2"
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
category = "main"
optional = true
@@ -515,8 +531,8 @@ htmlsoup = ["BeautifulSoup4"]
source = ["Cython (>=0.29.7)"]
[[package]]
-name = "markupsafe"
-version = "2.1.0"
+name = "MarkupSafe"
+version = "2.1.1"
description = "Safely add untrusted strings to HTML/XML markup."
category = "main"
optional = false
@@ -556,11 +572,19 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma
[[package]]
name = "mccabe"
-version = "0.6.1"
+version = "0.7.0"
description = "McCabe checker, plugin for flake8"
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
+
+[[package]]
+name = "more-itertools"
+version = "9.0.0"
+description = "More routines for operating on iterables, beyond itertools"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
[[package]]
name = "msgpack"
@@ -634,14 +658,11 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
[[package]]
name = "packaging"
-version = "21.3"
+version = "22.0"
description = "Core utilities for Python packages"
category = "main"
optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
+python-versions = ">=3.7"
[[package]]
name = "parameterized"
@@ -656,38 +677,42 @@ dev = ["jinja2"]
[[package]]
name = "pathspec"
-version = "0.9.0"
+version = "0.10.3"
description = "Utility library for gitignore style pattern matching of file paths."
category = "dev"
optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+python-versions = ">=3.7"
[[package]]
name = "phonenumbers"
-version = "8.12.44"
+version = "8.13.2"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main"
optional = false
python-versions = "*"
[[package]]
-name = "pillow"
-version = "9.0.1"
+name = "Pillow"
+version = "9.3.0"
description = "Python Imaging Library (Fork)"
category = "main"
optional = false
python-versions = ">=3.7"
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+
[[package]]
name = "pkginfo"
-version = "1.8.2"
+version = "1.9.2"
description = "Query metadatdata from sdists / bdists / installed packages."
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
[package.extras]
-testing = ["coverage", "nose"]
+testing = ["pytest", "pytest-cov"]
[[package]]
name = "pkgutil_resolve_name"
@@ -699,19 +724,19 @@ python-versions = ">=3.6"
[[package]]
name = "platformdirs"
-version = "2.5.1"
-description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+version = "2.6.0"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
category = "dev"
optional = false
python-versions = ">=3.7"
[package.extras]
-docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"]
-test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"]
+docs = ["furo (>=2022.9.29)", "proselint (>=0.13)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.4)"]
+test = ["appdirs (==1.4.4)", "pytest (>=7.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
[[package]]
name = "prometheus-client"
-version = "0.14.0"
+version = "0.15.0"
description = "Python client for the Prometheus monitoring system."
category = "main"
optional = false
@@ -722,7 +747,7 @@ twisted = ["twisted"]
[[package]]
name = "psycopg2"
-version = "2.9.3"
+version = "2.9.5"
description = "psycopg2 - Python-PostgreSQL Database Adapter"
category = "main"
optional = true
@@ -772,11 +797,11 @@ pyasn1 = ">=0.4.6,<0.5.0"
[[package]]
name = "pycodestyle"
-version = "2.8.0"
+version = "2.9.1"
description = "Python style guide checker"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.6"
[[package]]
name = "pycparser"
@@ -788,14 +813,14 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
[[package]]
name = "pydantic"
-version = "1.9.1"
+version = "1.10.2"
description = "Data validation and settings management using python type hints"
category = "main"
optional = false
-python-versions = ">=3.6.1"
+python-versions = ">=3.7"
[package.dependencies]
-typing-extensions = ">=3.7.4.3"
+typing-extensions = ">=4.1.0"
[package.extras]
dotenv = ["python-dotenv (>=0.10.4)"]
@@ -803,23 +828,23 @@ email = ["email-validator (>=1.0.3)"]
[[package]]
name = "pyflakes"
-version = "2.4.0"
+version = "2.5.0"
description = "passive checker of Python programs"
category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+python-versions = ">=3.6"
[[package]]
-name = "pygithub"
-version = "1.55"
+name = "PyGithub"
+version = "1.57"
description = "Use the full Github API v3"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
deprecated = "*"
-pyjwt = ">=2.0"
+pyjwt = ">=2.4.0"
pynacl = ">=1.4.0"
requests = ">=2.14.0"
@@ -827,25 +852,36 @@ requests = ">=2.14.0"
integrations = ["cryptography"]
[[package]]
-name = "pygments"
-version = "2.11.2"
+name = "Pygments"
+version = "2.13.0"
description = "Pygments is a syntax highlighting package written in Python."
category = "dev"
optional = false
-python-versions = ">=3.5"
+python-versions = ">=3.6"
+
+[package.extras]
+plugins = ["importlib-metadata"]
[[package]]
-name = "pyjwt"
-version = "2.4.0"
+name = "PyICU"
+version = "2.10.2"
+description = "Python extension wrapping the ICU C++ API"
+category = "main"
+optional = true
+python-versions = "*"
+
+[[package]]
+name = "PyJWT"
+version = "2.6.0"
description = "JSON Web Token implementation in Python"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.extras]
-crypto = ["cryptography (>=3.3.1)"]
-dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
-docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
+crypto = ["cryptography (>=3.4.0)"]
+dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"]
+docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"]
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
[[package]]
@@ -861,7 +897,7 @@ PyNaCl = ">=1.1.2,<2.0"
six = ">=1.8.0"
[[package]]
-name = "pympler"
+name = "Pympler"
version = "1.0.1"
description = "A development tool to measure, monitor and analyze the memory behavior of Python objects."
category = "main"
@@ -869,7 +905,7 @@ optional = true
python-versions = ">=3.6"
[[package]]
-name = "pynacl"
+name = "PyNaCl"
version = "1.5.0"
description = "Python binding to the Networking and Cryptography (NaCl) library"
category = "main"
@@ -884,34 +920,23 @@ docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"]
tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
-name = "pyopenssl"
-version = "22.0.0"
+name = "pyOpenSSL"
+version = "22.1.0"
description = "Python wrapper module around the OpenSSL library"
category = "main"
optional = false
python-versions = ">=3.6"
[package.dependencies]
-cryptography = ">=35.0"
+cryptography = ">=38.0.0,<39"
[package.extras]
-docs = ["sphinx", "sphinx-rtd-theme"]
+docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"]
test = ["flaky", "pretend", "pytest (>=3.0.1)"]
-[[package]]
-name = "pyparsing"
-version = "3.0.7"
-description = "Python parsing module"
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[package.extras]
-diagrams = ["jinja2", "railroad-diagrams"]
-
[[package]]
name = "pyrsistent"
-version = "0.18.1"
+version = "0.19.2"
description = "Persistent/Functional/Immutable data structures"
category = "main"
optional = false
@@ -919,14 +944,14 @@ python-versions = ">=3.7"
[[package]]
name = "pysaml2"
-version = "7.1.2"
+version = "7.2.1"
description = "Python implementation of SAML Version 2 Standard"
category = "main"
optional = true
python-versions = "<4,>=3.6"
[package.dependencies]
-cryptography = ">=1.4"
+cryptography = ">=3.1"
defusedxml = "*"
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
pyOpenSSL = "*"
@@ -953,7 +978,7 @@ six = ">=1.5"
[[package]]
name = "pytz"
-version = "2021.3"
+version = "2022.6"
description = "World timezone definitions, modern and historical"
category = "main"
optional = true
@@ -968,7 +993,7 @@ optional = false
python-versions = "*"
[[package]]
-name = "pyyaml"
+name = "PyYAML"
version = "6.0"
description = "YAML parser and emitter for Python"
category = "main"
@@ -977,11 +1002,11 @@ python-versions = ">=3.6"
[[package]]
name = "readme-renderer"
-version = "33.0"
+version = "37.3"
description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
bleach = ">=2.1.0"
@@ -993,29 +1018,29 @@ md = ["cmarkgfm (>=0.8.0)"]
[[package]]
name = "requests"
-version = "2.27.1"
+version = "2.28.1"
description = "Python HTTP for Humans."
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+python-versions = ">=3.7, <4"
[package.dependencies]
certifi = ">=2017.4.17"
-charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""}
-idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""}
+charset-normalizer = ">=2,<3"
+idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<1.27"
[package.extras]
-socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
-use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "requests-toolbelt"
-version = "0.9.1"
+version = "0.10.1"
description = "A utility belt for advanced users of python-requests"
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
[package.dependencies]
requests = ">=2.0.1,<3.0.0"
@@ -1032,8 +1057,24 @@ python-versions = ">=3.7"
idna2008 = ["idna"]
[[package]]
-name = "secretstorage"
-version = "3.3.1"
+name = "rich"
+version = "12.6.0"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+category = "dev"
+optional = false
+python-versions = ">=3.6.3,<4.0.0"
+
+[package.dependencies]
+commonmark = ">=0.9.0,<0.10.0"
+pygments = ">=2.6.0,<3.0.0"
+typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
+
+[[package]]
+name = "SecretStorage"
+version = "3.3.3"
description = "Python bindings to FreeDesktop.org Secret Service API"
category = "dev"
optional = false
@@ -1057,7 +1098,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "1.5.11"
+version = "1.11.1"
description = "Python client for Sentry (https://sentry.io)"
category = "main"
optional = true
@@ -1065,7 +1106,7 @@ python-versions = "*"
[package.dependencies]
certifi = "*"
-urllib3 = ">=1.10.0"
+urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
[package.extras]
aiohttp = ["aiohttp (>=3.5)"]
@@ -1075,14 +1116,17 @@ celery = ["celery (>=3)"]
chalice = ["chalice (>=1.16.0)"]
django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
+fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)"]
httpx = ["httpx (>=0.16.0)"]
pure_eval = ["asttokens", "executing", "pure-eval"]
+pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"]
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
rq = ["rq (>=0.6)"]
sanic = ["sanic (>=0.8)"]
sqlalchemy = ["sqlalchemy (>=1.2)"]
+starlette = ["starlette (>=0.19.1)"]
tornado = ["tornado (>=5)"]
[[package]]
@@ -1108,20 +1152,20 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"]
[[package]]
name = "setuptools"
-version = "65.3.0"
+version = "65.6.3"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
category = "main"
optional = false
python-versions = ">=3.7"
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
-testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
[[package]]
name = "setuptools-rust"
-version = "1.5.1"
+version = "1.5.2"
description = "Setuptools Rust extension plugin"
category = "main"
optional = false
@@ -1152,7 +1196,7 @@ dev = ["typing-extensions (>=3.5)"]
[[package]]
name = "simplejson"
-version = "3.17.6"
+version = "3.18.0"
description = "Simple, fast, extensible JSON encoder/decoder for Python"
category = "main"
optional = false
@@ -1203,7 +1247,7 @@ tornado = "*"
[[package]]
name = "thrift"
-version = "0.15.0"
+version = "0.16.0"
description = "Python bindings for the Apache Thrift RPC system"
category = "main"
optional = true
@@ -1219,27 +1263,27 @@ twisted = ["twisted"]
[[package]]
name = "tomli"
-version = "1.2.3"
+version = "2.0.1"
description = "A lil' TOML parser"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[[package]]
name = "tornado"
-version = "6.1"
+version = "6.2"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
category = "main"
optional = true
-python-versions = ">= 3.5"
+python-versions = ">= 3.7"
[[package]]
name = "towncrier"
-version = "21.9.0"
+version = "22.8.0"
description = "Building newsfiles for your project."
category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=3.7"
[package.dependencies]
click = "*"
@@ -1247,27 +1291,11 @@ click-default-group = "*"
incremental = "*"
jinja2 = "*"
setuptools = "*"
-tomli = {version = "*", markers = "python_version >= \"3.6\""}
+tomli = "*"
[package.extras]
dev = ["packaging"]
-[[package]]
-name = "tqdm"
-version = "4.63.0"
-description = "Fast, Extensible Progress Meter"
-category = "dev"
-optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
-
-[package.dependencies]
-colorama = {version = "*", markers = "platform_system == \"Windows\""}
-
-[package.extras]
-dev = ["py-make (>=0.1.0)", "twine", "wheel"]
-notebook = ["ipywidgets (>=6)"]
-telegram = ["requests"]
-
[[package]]
name = "treq"
version = "22.2.0"
@@ -1289,31 +1317,30 @@ docs = ["sphinx (>=1.4.8)"]
[[package]]
name = "twine"
-version = "3.8.0"
+version = "4.0.2"
description = "Collection of utilities for publishing packages on PyPI"
category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[package.dependencies]
-colorama = ">=0.4.3"
importlib-metadata = ">=3.6"
keyring = ">=15.1"
pkginfo = ">=1.8.1"
-readme-renderer = ">=21.0"
+readme-renderer = ">=35.0"
requests = ">=2.20"
requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0"
rfc3986 = ">=1.4.0"
-tqdm = ">=4.14"
+rich = ">=12.0.0"
urllib3 = ">=1.26.0"
[[package]]
-name = "twisted"
-version = "22.4.0"
+name = "Twisted"
+version = "22.10.0"
description = "An asynchronous networking framework written in Python"
category = "main"
optional = false
-python-versions = ">=3.6.7"
+python-versions = ">=3.7.1"
[package.dependencies]
attrs = ">=19.2.0"
@@ -1322,27 +1349,28 @@ constantly = ">=15.1"
hyperlink = ">=17.1.1"
idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""}
incremental = ">=21.3.0"
-pyopenssl = {version = ">=16.0.0", optional = true, markers = "extra == \"tls\""}
+pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""}
service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""}
twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""}
typing-extensions = ">=3.6.5"
"zope.interface" = ">=4.4.2"
[package.extras]
-all_non_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+all_non_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
conch_nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
contextvars = ["contextvars (>=2.4,<3)"]
-dev = ["coverage (>=6b1,<7)", "pydoctor (>=21.9.0,<21.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)"]
-dev_release = ["pydoctor (>=21.9.0,<21.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)"]
+dev = ["coverage (>=6b1,<7)", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)"]
+dev_release = ["pydoctor (>=22.9.0,<22.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)"]
+gtk_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pygobject", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
-macos_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
-mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=21.9.0,<21.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
-osx_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+macos_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=22.9.0,<22.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=5.0,<6)", "sphinx-rtd-theme (>=1.0,<2.0)", "towncrier (>=22.8,<23.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
+osx_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
-test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)"]
-tls = ["idna (>=2.4)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)"]
-windows_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
+test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.0,<7.0)"]
+tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
+windows_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.0,<7.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
[[package]]
name = "twisted-iocpsupport"
@@ -1366,7 +1394,7 @@ twisted = "*"
[[package]]
name = "typed-ast"
-version = "1.5.2"
+version = "1.5.4"
description = "a fork of Python 2 and 3 ast modules with type comment support"
category = "dev"
optional = false
@@ -1374,7 +1402,7 @@ python-versions = ">=3.6"
[[package]]
name = "types-bleach"
-version = "4.1.4"
+version = "5.0.3.1"
description = "Typing stubs for bleach"
category = "dev"
optional = false
@@ -1390,35 +1418,15 @@ python-versions = "*"
[[package]]
name = "types-cryptography"
-version = "3.3.15"
+version = "3.3.23.2"
description = "Typing stubs for cryptography"
category = "dev"
optional = false
python-versions = "*"
-[package.dependencies]
-types-enum34 = "*"
-types-ipaddress = "*"
-
-[[package]]
-name = "types-enum34"
-version = "1.1.8"
-description = "Typing stubs for enum34"
-category = "dev"
-optional = false
-python-versions = "*"
-
-[[package]]
-name = "types-ipaddress"
-version = "1.0.8"
-description = "Typing stubs for ipaddress"
-category = "dev"
-optional = false
-python-versions = "*"
-
[[package]]
name = "types-jsonschema"
-version = "4.4.6"
+version = "4.17.0.2"
description = "Typing stubs for jsonschema"
category = "dev"
optional = false
@@ -1426,15 +1434,15 @@ python-versions = "*"
[[package]]
name = "types-opentracing"
-version = "2.4.7"
+version = "2.4.10"
description = "Typing stubs for opentracing"
category = "dev"
optional = false
python-versions = "*"
[[package]]
-name = "types-pillow"
-version = "9.0.15"
+name = "types-Pillow"
+version = "9.3.0.4"
description = "Typing stubs for Pillow"
category = "dev"
optional = false
@@ -1442,7 +1450,7 @@ python-versions = "*"
[[package]]
name = "types-psycopg2"
-version = "2.9.9"
+version = "2.9.21.2"
description = "Typing stubs for psycopg2"
category = "dev"
optional = false
@@ -1450,7 +1458,7 @@ python-versions = "*"
[[package]]
name = "types-pyOpenSSL"
-version = "22.0.10"
+version = "22.1.0.2"
description = "Typing stubs for pyOpenSSL"
category = "dev"
optional = false
@@ -1461,7 +1469,7 @@ types-cryptography = "*"
[[package]]
name = "types-PyYAML"
-version = "6.0.12"
+version = "6.0.12.2"
description = "Typing stubs for PyYAML"
category = "dev"
optional = false
@@ -1469,7 +1477,7 @@ python-versions = "*"
[[package]]
name = "types-requests"
-version = "2.28.11"
+version = "2.28.11.5"
description = "Typing stubs for requests"
category = "dev"
optional = false
@@ -1480,7 +1488,7 @@ types-urllib3 = "<1.27"
[[package]]
name = "types-setuptools"
-version = "65.4.0.0"
+version = "65.6.0.2"
description = "Typing stubs for setuptools"
category = "dev"
optional = false
@@ -1488,7 +1496,7 @@ python-versions = "*"
[[package]]
name = "types-urllib3"
-version = "1.26.10"
+version = "1.26.25.4"
description = "Typing stubs for urllib3"
category = "dev"
optional = false
@@ -1496,11 +1504,11 @@ python-versions = "*"
[[package]]
name = "typing-extensions"
-version = "4.1.1"
-description = "Backported and Experimental Type Hints for Python 3.6+"
+version = "4.4.0"
+description = "Backported and Experimental Type Hints for Python 3.7+"
category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
[[package]]
name = "unpaddedbase64"
@@ -1512,23 +1520,23 @@ python-versions = ">=3.6,<4.0"
[[package]]
name = "urllib3"
-version = "1.26.8"
+version = "1.26.13"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
[package.extras]
-brotli = ["brotlipy (>=0.6.0)"]
-secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)"]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
name = "uvloop"
version = "0.17.0"
description = "Fast implementation of asyncio event loop on top of libuv"
-category = "dev"
-optional = false
+category = "main"
+optional = true
python-versions = ">=3.7"
[package.extras]
@@ -1546,7 +1554,7 @@ python-versions = "*"
[[package]]
name = "wrapt"
-version = "1.13.3"
+version = "1.14.1"
description = "Module for decorators, wrappers and monkey patching."
category = "dev"
optional = false
@@ -1554,31 +1562,31 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
[[package]]
name = "xmlschema"
-version = "1.10.0"
+version = "2.1.1"
description = "An XML Schema validator and decoder"
category = "main"
optional = true
python-versions = ">=3.7"
[package.dependencies]
-elementpath = ">=2.5.0,<3.0.0"
+elementpath = ">=3.0.0,<4.0.0"
[package.extras]
-codegen = ["elementpath (>=2.5.0,<3.0.0)", "jinja2"]
-dev = ["Sphinx", "coverage", "elementpath (>=2.5.0,<3.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"]
-docs = ["Sphinx", "elementpath (>=2.5.0,<3.0.0)", "jinja2", "sphinx-rtd-theme"]
+codegen = ["elementpath (>=3.0.0,<4.0.0)", "jinja2"]
+dev = ["Sphinx", "coverage", "elementpath (>=3.0.0,<4.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"]
+docs = ["Sphinx", "elementpath (>=3.0.0,<4.0.0)", "jinja2", "sphinx-rtd-theme"]
[[package]]
name = "zipp"
-version = "3.7.0"
+version = "3.11.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
category = "main"
optional = false
python-versions = ">=3.7"
[package.extras]
-docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
-testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"]
+testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
[[package]]
name = "zope.event"
@@ -1597,7 +1605,7 @@ test = ["zope.testrunner"]
[[package]]
name = "zope.interface"
-version = "5.4.0"
+version = "5.5.2"
description = "Interfaces for Python"
category = "main"
optional = false
@@ -1613,7 +1621,7 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
[[package]]
name = "zope.schema"
-version = "6.2.0"
+version = "6.2.1"
description = "zope.interface extension for defining data schemas"
category = "dev"
optional = false
@@ -1629,8 +1637,8 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
-all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler"]
-cache_memory = ["Pympler"]
+all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu", "uvloop"]
+cache-memory = ["Pympler"]
jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
oidc = ["authlib"]
@@ -1641,141 +1649,157 @@ saml2 = ["pysaml2"]
sentry = ["sentry-sdk"]
systemd = ["systemd-python"]
test = ["parameterized", "idna"]
-url_preview = ["lxml"]
+url-preview = ["lxml"]
+user-search = ["pyicu"]
+uvloop = ["uvloop"]
[metadata]
lock-version = "1.1"
python-versions = "^3.7.1"
-content-hash = "d5e09dd26ae2f64ba4a9defe9d9cae6fb895ac56790c2fbf58be88b67d08e1a5"
+content-hash = "288ca485f750dbacfe32dcde419c5275b364a5ea87919d1fcd46a45776666ef4"
[metadata.files]
attrs = [
- {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
- {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
+ {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
+ {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
]
Authlib = [
- {file = "Authlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523"},
- {file = "Authlib-1.1.0.tar.gz", hash = "sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891"},
+ {file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"},
+ {file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"},
]
-automat = [
- {file = "Automat-20.2.0-py2.py3-none-any.whl", hash = "sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111"},
- {file = "Automat-20.2.0.tar.gz", hash = "sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33"},
+Automat = [
+ {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"},
+ {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"},
]
bcrypt = [
- {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b589229207630484aefe5899122fb938a5b017b0f4349f769b8c13e78d99a8fd"},
- {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux1_x86_64.whl", hash = "sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux2010_x86_64.whl", hash = "sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux2014_aarch64.whl", hash = "sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d"},
- {file = "bcrypt-3.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a0584a92329210fcd75eb8a3250c5a941633f8bfaf2a18f81009b097732839b7"},
- {file = "bcrypt-3.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:56e5da069a76470679f312a7d3d23deb3ac4519991a0361abc11da837087b61d"},
- {file = "bcrypt-3.2.0-cp36-abi3-win32.whl", hash = "sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55"},
- {file = "bcrypt-3.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34"},
- {file = "bcrypt-3.2.0.tar.gz", hash = "sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29"},
+ {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"},
+ {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"},
+ {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"},
+ {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"},
+ {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"},
+ {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"},
+ {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"},
+ {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"},
+ {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"},
+ {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"},
]
black = [
- {file = "black-22.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09"},
- {file = "black-22.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5795a0375eb87bfe902e80e0c8cfaedf8af4d49694d69161e5bd3206c18618bb"},
- {file = "black-22.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3556168e2e5c49629f7b0f377070240bd5511e45e25a4497bb0073d9dda776a"},
- {file = "black-22.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67c8301ec94e3bcc8906740fe071391bce40a862b7be0b86fb5382beefecd968"},
- {file = "black-22.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"},
- {file = "black-22.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc1e1de68c8e5444e8f94c3670bb48a2beef0e91dddfd4fcc29595ebd90bb9ce"},
- {file = "black-22.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2fc92002d44746d3e7db7cf9313cf4452f43e9ea77a2c939defce3b10b5c82"},
- {file = "black-22.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a6342964b43a99dbc72f72812bf88cad8f0217ae9acb47c0d4f141a6416d2d7b"},
- {file = "black-22.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:328efc0cc70ccb23429d6be184a15ce613f676bdfc85e5fe8ea2a9354b4e9015"},
- {file = "black-22.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06f9d8846f2340dfac80ceb20200ea5d1b3f181dd0556b47af4e8e0b24fa0a6b"},
- {file = "black-22.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4efa5fad66b903b4a5f96d91461d90b9507a812b3c5de657d544215bb7877a"},
- {file = "black-22.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8477ec6bbfe0312c128e74644ac8a02ca06bcdb8982d4ee06f209be28cdf163"},
- {file = "black-22.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:637a4014c63fbf42a692d22b55d8ad6968a946b4a6ebc385c5505d9625b6a464"},
- {file = "black-22.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:863714200ada56cbc366dc9ae5291ceb936573155f8bf8e9de92aef51f3ad0f0"},
- {file = "black-22.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10dbe6e6d2988049b4655b2b739f98785a884d4d6b85bc35133a8fb9a2233176"},
- {file = "black-22.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:cee3e11161dde1b2a33a904b850b0899e0424cc331b7295f2a9698e79f9a69a0"},
- {file = "black-22.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5891ef8abc06576985de8fa88e95ab70641de6c1fca97e2a15820a9b69e51b20"},
- {file = "black-22.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:30d78ba6bf080eeaf0b7b875d924b15cd46fec5fd044ddfbad38c8ea9171043a"},
- {file = "black-22.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee8f1f7228cce7dffc2b464f07ce769f478968bfb3dd1254a4c2eeed84928aad"},
- {file = "black-22.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee227b696ca60dd1c507be80a6bc849a5a6ab57ac7352aad1ffec9e8b805f21"},
- {file = "black-22.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9b542ced1ec0ceeff5b37d69838106a6348e60db7b8fdd245294dc1d26136265"},
- {file = "black-22.3.0-py3-none-any.whl", hash = "sha256:bc58025940a896d7e5356952228b68f793cf5fcb342be703c3a2669a1488cb72"},
- {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"},
+ {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"},
+ {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"},
+ {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"},
+ {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"},
+ {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"},
+ {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"},
+ {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"},
+ {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"},
+ {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"},
+ {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"},
+ {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"},
+ {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"},
]
bleach = [
- {file = "bleach-4.1.0-py2.py3-none-any.whl", hash = "sha256:4d2651ab93271d1129ac9cbc679f524565cc8a1b791909c4a51eac4446a15994"},
- {file = "bleach-4.1.0.tar.gz", hash = "sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da"},
+ {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"},
+ {file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"},
]
canonicaljson = [
- {file = "canonicaljson-1.6.3-py3-none-any.whl", hash = "sha256:6ba3cf1702fa3d209b3e915a4e9a3e4ef194f1e8fca189c1f0b7a2a7686a27e6"},
- {file = "canonicaljson-1.6.3.tar.gz", hash = "sha256:ca59760bc274a899a0da75809d6909ae43e5123381fd6ef040a44d1952c0b448"},
+ {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"},
+ {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"},
]
certifi = [
- {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
- {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"},
+ {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
+ {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
]
cffi = [
- {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"},
- {file = "cffi-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0"},
- {file = "cffi-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14"},
- {file = "cffi-1.15.0-cp27-cp27m-win32.whl", hash = "sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474"},
- {file = "cffi-1.15.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6"},
- {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27"},
- {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023"},
- {file = "cffi-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2"},
- {file = "cffi-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e"},
- {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7"},
- {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3"},
- {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c"},
- {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962"},
- {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382"},
- {file = "cffi-1.15.0-cp310-cp310-win32.whl", hash = "sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55"},
- {file = "cffi-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0"},
- {file = "cffi-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e"},
- {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39"},
- {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc"},
- {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032"},
- {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8"},
- {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605"},
- {file = "cffi-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e"},
- {file = "cffi-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc"},
- {file = "cffi-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636"},
- {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4"},
- {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997"},
- {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b"},
- {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2"},
- {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7"},
- {file = "cffi-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66"},
- {file = "cffi-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029"},
- {file = "cffi-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880"},
- {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20"},
- {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024"},
- {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e"},
- {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728"},
- {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6"},
- {file = "cffi-1.15.0-cp38-cp38-win32.whl", hash = "sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c"},
- {file = "cffi-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443"},
- {file = "cffi-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a"},
- {file = "cffi-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37"},
- {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a"},
- {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e"},
- {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796"},
- {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df"},
- {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8"},
- {file = "cffi-1.15.0-cp39-cp39-win32.whl", hash = "sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a"},
- {file = "cffi-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139"},
- {file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"},
+ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
+ {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
+ {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
+ {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
+ {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
+ {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
+ {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
+ {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
+ {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
+ {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
+ {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
+ {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
+ {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
+ {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
+ {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
+ {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
+ {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
+ {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
+ {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
]
charset-normalizer = [
- {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"},
- {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"},
+ {file = "charset-normalizer-2.1.1.tar.gz", hash = "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845"},
+ {file = "charset_normalizer-2.1.1-py3-none-any.whl", hash = "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"},
]
click = [
- {file = "click-8.1.1-py3-none-any.whl", hash = "sha256:5e0d195c2067da3136efb897449ec1e9e6c98282fbf30d7f9e164af9be901a6b"},
- {file = "click-8.1.1.tar.gz", hash = "sha256:7ab900e38149c9872376e8f9b5986ddcaf68c0f413cf73678a0bca5547e6f976"},
+ {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
+ {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
]
click-default-group = [
{file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"},
]
colorama = [
- {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
- {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
commonmark = [
{file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"},
@@ -1786,54 +1810,60 @@ constantly = [
{file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"},
]
cryptography = [
- {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:73bc2d3f2444bcfeac67dd130ff2ea598ea5f20b40e36d19821b4df8c9c5037b"},
- {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:2d87cdcb378d3cfed944dac30596da1968f88fb96d7fc34fdae30a99054b2e31"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74d6c7e80609c0f4c2434b97b80c7f8fdfaa072ca4baab7e239a15d6d70ed73a"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:6c0c021f35b421ebf5976abf2daacc47e235f8b6082d3396a2fe3ccd537ab173"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59a9d55027a8b88fd9fd2826c4392bd487d74bf628bb9d39beecc62a644c12"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a817b961b46894c5ca8a66b599c745b9a3d9f822725221f0e0fe49dc043a3a3"},
- {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:94ae132f0e40fe48f310bba63f477f14a43116f05ddb69d6fa31e93f05848ae2"},
- {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7be0eec337359c155df191d6ae00a5e8bbb63933883f4f5dffc439dac5348c3f"},
- {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e0344c14c9cb89e76eb6a060e67980c9e35b3f36691e15e1b7a9e58a0a6c6dc3"},
- {file = "cryptography-36.0.1-cp36-abi3-win32.whl", hash = "sha256:4caa4b893d8fad33cf1964d3e51842cd78ba87401ab1d2e44556826df849a8ca"},
- {file = "cryptography-36.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:391432971a66cfaf94b21c24ab465a4cc3e8bf4a939c1ca5c3e3a6e0abebdbcf"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb5829d027ff82aa872d76158919045a7c1e91fbf241aec32cb07956e9ebd3c9"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc15b1c22e55c4d5566e3ca4db8689470a0ca2babef8e3a9ee057a8b82ce4b1"},
- {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:596f3cd67e1b950bc372c33f1a28a0692080625592ea6392987dba7f09f17a94"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:30ee1eb3ebe1644d1c3f183d115a8c04e4e603ed6ce8e394ed39eea4a98469ac"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec63da4e7e4a5f924b90af42eddf20b698a70e58d86a72d943857c4c6045b3ee"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca238ceb7ba0bdf6ce88c1b74a87bffcee5afbfa1e41e173b1ceb095b39add46"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:ca28641954f767f9822c24e927ad894d45d5a1e501767599647259cbf030b903"},
- {file = "cryptography-36.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:39bdf8e70eee6b1c7b289ec6e5d84d49a6bfa11f8b8646b5b3dfe41219153316"},
- {file = "cryptography-36.0.1.tar.gz", hash = "sha256:53e5c1dc3d7a953de055d77bef2ff607ceef7a2aac0353b5d630ab67f7423638"},
+ {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"},
+ {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"},
+ {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"},
+ {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"},
+ {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"},
+ {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"},
+ {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"},
+ {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"},
+ {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"},
+ {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"},
]
defusedxml = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
]
-deprecated = [
+Deprecated = [
{file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"},
{file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"},
]
docutils = [
- {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"},
- {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"},
+ {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"},
+ {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"},
]
elementpath = [
- {file = "elementpath-2.5.0-py3-none-any.whl", hash = "sha256:2a432775e37a19e4362443078130a7dbfc457d7d093cd421c03958d9034cc08b"},
- {file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"},
+ {file = "elementpath-3.0.2-py3-none-any.whl", hash = "sha256:6122419481a4c73101918714274b2cec907feecd04a44b623b4bae4292853328"},
+ {file = "elementpath-3.0.2.tar.gz", hash = "sha256:cca18742dc0f354f79874c41a906e6ce4cc15230b7858d22a861e1ec5946940f"},
]
flake8 = [
- {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"},
- {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"},
+ {file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"},
+ {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"},
]
flake8-bugbear = [
- {file = "flake8-bugbear-21.3.2.tar.gz", hash = "sha256:cadce434ceef96463b45a7c3000f23527c04ea4b531d16c7ac8886051f516ca0"},
- {file = "flake8_bugbear-21.3.2-py36.py37.py38-none-any.whl", hash = "sha256:5d6ccb0c0676c738a6e066b4d50589c408dcc1c5bf1d73b464b18b73cd6c05c2"},
+ {file = "flake8-bugbear-22.12.6.tar.gz", hash = "sha256:4cdb2c06e229971104443ae293e75e64c6107798229202fbe4f4091427a30ac0"},
+ {file = "flake8_bugbear-22.12.6-py3-none-any.whl", hash = "sha256:b69a510634f8a9c298dfda2b18a8036455e6b19ecac4fe582e4d7a0abfa50a30"},
]
flake8-comprehensions = [
- {file = "flake8-comprehensions-3.8.0.tar.gz", hash = "sha256:8e108707637b1d13734f38e03435984f6b7854fa6b5a4e34f93e69534be8e521"},
- {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"},
+ {file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"},
+ {file = "flake8_comprehensions-3.10.1-py3-none-any.whl", hash = "sha256:d763de3c74bc18a79c039a7ec732e0a1985b0c79309ceb51e56401ad0a2cd44e"},
]
frozendict = [
{file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"},
@@ -1855,63 +1885,110 @@ frozendict = [
{file = "frozendict-2.3.4.tar.gz", hash = "sha256:15b4b18346259392b0d27598f240e9390fafbff882137a9c48a1e0104fb17f78"},
]
gitdb = [
- {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"},
- {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"},
+ {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"},
+ {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"},
]
-gitpython = [
- {file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"},
- {file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"},
+GitPython = [
+ {file = "GitPython-3.1.29-py3-none-any.whl", hash = "sha256:41eea0deec2deea139b459ac03656f0dd28fc4a3387240ec1d3c259a2c47850f"},
+ {file = "GitPython-3.1.29.tar.gz", hash = "sha256:cc36bfc4a3f913e66805a28e84703e419d9c264c1077e537b54f0e1af85dbefd"},
]
hiredis = [
- {file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"},
- {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0adea425b764a08270820531ec2218d0508f8ae15a448568109ffcae050fee26"},
- {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:3d55e36715ff06cdc0ab62f9591607c4324297b6b6ce5b58cb9928b3defe30ea"},
- {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:5d2a48c80cf5a338d58aae3c16872f4d452345e18350143b3bf7216d33ba7b99"},
- {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:240ce6dc19835971f38caf94b5738092cb1e641f8150a9ef9251b7825506cb05"},
- {file = "hiredis-2.0.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5dc7a94bb11096bc4bffd41a3c4f2b958257085c01522aa81140c68b8bf1630a"},
- {file = "hiredis-2.0.0-cp36-cp36m-win32.whl", hash = "sha256:139705ce59d94eef2ceae9fd2ad58710b02aee91e7fa0ccb485665ca0ecbec63"},
- {file = "hiredis-2.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c39c46d9e44447181cd502a35aad2bb178dbf1b1f86cf4db639d7b9614f837c6"},
- {file = "hiredis-2.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:adf4dd19d8875ac147bf926c727215a0faf21490b22c053db464e0bf0deb0485"},
- {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0f41827028901814c709e744060843c77e78a3aca1e0d6875d2562372fcb405a"},
- {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:508999bec4422e646b05c95c598b64bdbef1edf0d2b715450a078ba21b385bcc"},
- {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:0d5109337e1db373a892fdcf78eb145ffb6bbd66bb51989ec36117b9f7f9b579"},
- {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:04026461eae67fdefa1949b7332e488224eac9e8f2b5c58c98b54d29af22093e"},
- {file = "hiredis-2.0.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a00514362df15af041cc06e97aebabf2895e0a7c42c83c21894be12b84402d79"},
- {file = "hiredis-2.0.0-cp37-cp37m-win32.whl", hash = "sha256:09004096e953d7ebd508cded79f6b21e05dff5d7361771f59269425108e703bc"},
- {file = "hiredis-2.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f8196f739092a78e4f6b1b2172679ed3343c39c61a3e9d722ce6fcf1dac2824a"},
- {file = "hiredis-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:294a6697dfa41a8cba4c365dd3715abc54d29a86a40ec6405d677ca853307cfb"},
- {file = "hiredis-2.0.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3dddf681284fe16d047d3ad37415b2e9ccdc6c8986c8062dbe51ab9a358b50a5"},
- {file = "hiredis-2.0.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:dcef843f8de4e2ff5e35e96ec2a4abbdf403bd0f732ead127bd27e51f38ac298"},
- {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:87c7c10d186f1743a8fd6a971ab6525d60abd5d5d200f31e073cd5e94d7e7a9d"},
- {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:7f0055f1809b911ab347a25d786deff5e10e9cf083c3c3fd2dd04e8612e8d9db"},
- {file = "hiredis-2.0.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:11d119507bb54e81f375e638225a2c057dda748f2b1deef05c2b1a5d42686048"},
- {file = "hiredis-2.0.0-cp38-cp38-win32.whl", hash = "sha256:7492af15f71f75ee93d2a618ca53fea8be85e7b625e323315169977fae752426"},
- {file = "hiredis-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:65d653df249a2f95673976e4e9dd7ce10de61cfc6e64fa7eeaa6891a9559c581"},
- {file = "hiredis-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8427a5e9062ba66fc2c62fb19a72276cf12c780e8db2b0956ea909c48acff5"},
- {file = "hiredis-2.0.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:3f5f7e3a4ab824e3de1e1700f05ad76ee465f5f11f5db61c4b297ec29e692b2e"},
- {file = "hiredis-2.0.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e3447d9e074abf0e3cd85aef8131e01ab93f9f0e86654db7ac8a3f73c63706ce"},
- {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:8b42c0dc927b8d7c0eb59f97e6e34408e53bc489f9f90e66e568f329bff3e443"},
- {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:b84f29971f0ad4adaee391c6364e6f780d5aae7e9226d41964b26b49376071d0"},
- {file = "hiredis-2.0.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:0b39ec237459922c6544d071cdcf92cbb5bc6685a30e7c6d985d8a3e3a75326e"},
- {file = "hiredis-2.0.0-cp39-cp39-win32.whl", hash = "sha256:a7928283143a401e72a4fad43ecc85b35c27ae699cf5d54d39e1e72d97460e1d"},
- {file = "hiredis-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:a4ee8000454ad4486fb9f28b0cab7fa1cd796fc36d639882d0b34109b5b3aec9"},
- {file = "hiredis-2.0.0-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1f03d4dadd595f7a69a75709bc81902673fa31964c75f93af74feac2f134cc54"},
- {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:04927a4c651a0e9ec11c68e4427d917e44ff101f761cd3b5bc76f86aaa431d27"},
- {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a39efc3ade8c1fb27c097fd112baf09d7fd70b8cb10ef1de4da6efbe066d381d"},
- {file = "hiredis-2.0.0-pp36-pypy36_pp73-win32.whl", hash = "sha256:07bbf9bdcb82239f319b1f09e8ef4bdfaec50ed7d7ea51a56438f39193271163"},
- {file = "hiredis-2.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:807b3096205c7cec861c8803a6738e33ed86c9aae76cac0e19454245a6bbbc0a"},
- {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:1233e303645f468e399ec906b6b48ab7cd8391aae2d08daadbb5cad6ace4bd87"},
- {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:cb2126603091902767d96bcb74093bd8b14982f41809f85c9b96e519c7e1dc41"},
- {file = "hiredis-2.0.0-pp37-pypy37_pp73-win32.whl", hash = "sha256:f52010e0a44e3d8530437e7da38d11fb822acfb0d5b12e9cd5ba655509937ca0"},
- {file = "hiredis-2.0.0.tar.gz", hash = "sha256:81d6d8e39695f2c37954d1011c0480ef7cf444d4e3ae24bc5e89ee5de360139a"},
+ {file = "hiredis-2.1.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:7b339a7542a3f6a10b3bbc157e4abc9bae9628e2df7faf5f8a32f730014719ae"},
+ {file = "hiredis-2.1.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:dd82370c2f9f804ec617b95d25edb0fd04882251afb2ecdf08b9ced0c3aa4bcc"},
+ {file = "hiredis-2.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:92077511d3a62109d5d11bf584e41264a993ae3c77c72de63c1f741b7809bacb"},
+ {file = "hiredis-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6544c7807cbb75bc6ae9ab85773b4413edbcd55342e9e3d7d3f159f677f7428"},
+ {file = "hiredis-2.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8181d73f25943fbdca904154e51b845317103cee08116cfae258f96927ce1e74"},
+ {file = "hiredis-2.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040f861e4e43daa9287f3a85979542f9c7ee8cfab695fa662f3b6186c6f7d5e8"},
+ {file = "hiredis-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef5ae8c1af82a8000742003cb16a6fa6c57919abb861ab214dcb27db8573ee64"},
+ {file = "hiredis-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9b9aa1b0ec46dec5b05dcec22e50bbd4af33da121fca83bd2601dc60c79183f9"},
+ {file = "hiredis-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c53c36a630a6c6fd9dfe439f4266e564ca58995015a780c1d964567ebf328466"},
+ {file = "hiredis-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:05aab35210bd7fbd7bd066efb2a42eb5c2878c2c137a9cff597204be2c07475b"},
+ {file = "hiredis-2.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e6097e1cef647c665f71cd0e58346389580db98365e804f7a9ad5d96e66b7150"},
+ {file = "hiredis-2.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:32f98370efed38088d000df2eb2c8ed43d93d99bbf4a0a740e15eb4a887cc23f"},
+ {file = "hiredis-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b85276ed57e0aee8910b48383a38a299851935ba134460bad394988c750985fe"},
+ {file = "hiredis-2.1.0-cp310-cp310-win32.whl", hash = "sha256:bd9d99606008a8cfa6b9e950abaa35f5b87496f03e63b73197d02b0fe7ecb6d3"},
+ {file = "hiredis-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:6a8e796c94b7b8c63c99757d6ec2075069e4c362dfb0f130aaf874422bea3e7d"},
+ {file = "hiredis-2.1.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:e7bb5cab604fc45b45cee40e84e84d9e30eeb34c571a3784392ae658273bbd23"},
+ {file = "hiredis-2.1.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:e0d4b074ff5ebba00933da27a06f3752b8af2448a6aa9dc895d5279f43011530"},
+ {file = "hiredis-2.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f0c2dbaffd4a9e8df04731a012c8a67b7517abec7e53bb12c3cd749865c63428"},
+ {file = "hiredis-2.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c19151e79b36e0d849899a21fc10539aa1903af94b31754bddab1bea876cd508"},
+ {file = "hiredis-2.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08ec41519a533f5cd1f1f8bd1797929358117c8e4570b679b469f768b45b7dbf"},
+ {file = "hiredis-2.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f0db3667fa8abbd37ac66385b460841029033bfc1ba8d7e5b3ff1e01d3346a"},
+ {file = "hiredis-2.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f592d1522b5981890b34b0b814f4bfa4a68b23ee90f538aac321d17e8bf859c8"},
+ {file = "hiredis-2.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dddd2be67de25a62b3bf871f091181c13da3b32186d4be6af49dadbf6fdc266d"},
+ {file = "hiredis-2.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4ee8f6d0774cd6179c625688201e961a2d03da212230adaa2193cfb7a04f9169"},
+ {file = "hiredis-2.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5000942ffb6b6410ccbc87089c15fde5f48bd205664ee8b3067e6b2fb5689485"},
+ {file = "hiredis-2.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:21e0017b8f50abd13b4c4c4218c7dfd5a42623e3255b460dfa5f70b45c4e7c3e"},
+ {file = "hiredis-2.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:40b55fb46fcc78b04190176c0ae28bfa3cc7f418fca9df06c037028af5942b6a"},
+ {file = "hiredis-2.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:24a55169a7f0bd9458935ac644bf8191f127c8aa50cdd70c0b87928cc515cae5"},
+ {file = "hiredis-2.1.0-cp311-cp311-win32.whl", hash = "sha256:bb60f79e8c1eb5971b10fd256764ea0c89c4ad2d55ac4379981f678f349411f2"},
+ {file = "hiredis-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:b223668844f26034759a6c24a72f0bb8e4fb64a43b27e2f3e8378639eaac1661"},
+ {file = "hiredis-2.1.0-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:7f7e7d91d6533fcb1939d467cf8bfb98640edf715897959f31ae83f5ad29aed3"},
+ {file = "hiredis-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531d1d3955244831b69272b993e16f93489ce2dadfdf800ac856dc2d9a43d353"},
+ {file = "hiredis-2.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66ffcbfc4db52dd87cdfd53bda45881ab3ab07c80ec43244fd8d70ee69d42c01"},
+ {file = "hiredis-2.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:023b3b3ac410d6cfdb45ee943b8c528c90379f31419a1fd229888aa2b965732d"},
+ {file = "hiredis-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c972385a0647120d4b0fe0e9567257cad7b2577b9f1315815713c571af0e778d"},
+ {file = "hiredis-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32893825426e73d57b3290b68110dd76229945e6c79b08a37795f536501935c4"},
+ {file = "hiredis-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:262148f9b616c0cdd0f2c6bda45cd0f1ce6ce2d1974efd296b85b44e5c7567c2"},
+ {file = "hiredis-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d601c27b9599fe52cade3096351f92f665e527d29af8d3e29353a76bfcf5615"},
+ {file = "hiredis-2.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:d248acc7d7713c1b3d48ed8ea67d6ba43b104aa67d63078846a3590adbab6b73"},
+ {file = "hiredis-2.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:969ffe37a8980a6e5404993ccfe605a40fa6732fa6d7b26a1a718c9121197002"},
+ {file = "hiredis-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:288d5d0566d3cbcd800e46c7a547428d321842898b8c7de037a7e78b5644e88a"},
+ {file = "hiredis-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:06cb776d3cd3cbec86010f1bab6895ee16af8036aae8c3594a5e96c24f0f83a5"},
+ {file = "hiredis-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:6766376dc43ef186113422ecacec0ece0d4b12c0e5f4b556669e639b20ccabb1"},
+ {file = "hiredis-2.1.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:41afba30304adcbe1c93fc8272a7169b7fc4e4d3d470ad8babd391678a519d76"},
+ {file = "hiredis-2.1.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:6df0115f8b0766cd3d12416e2e2e914efed5b1a1a27605c9f37bc92de086877a"},
+ {file = "hiredis-2.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d7d7078f3b841ad86e35459e9f1a49db6d793b796a25fe866333166196d9fec"},
+ {file = "hiredis-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:835c4cbf8b38c83240b3eb9bd575cd1bfefe5ea5c46cc5ac2bf2d1f47d1fd696"},
+ {file = "hiredis-2.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:718589c48e97820bdc2a99e2621b5039884cc23199213756054d10cd309ad56c"},
+ {file = "hiredis-2.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2d96be6917ea8f753691a4674f682dd5e145b70edab28c05aa5552ae873e843"},
+ {file = "hiredis-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5fe1bb4b1525751f3050337097b3b2bfe445836e59a5a0984928dd0797f9abf"},
+ {file = "hiredis-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91dc73310b92b4aeccffdcd4a762955fe71380f5eaa4e242ee95019e41519101"},
+ {file = "hiredis-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bb858218de60a930a164a991fff001c70b0c3d923d3ae40fef2acf3321126b00"},
+ {file = "hiredis-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:53040c3b3488b52f4609775453fc759262f2885b733150ee2e1d88257fdafed8"},
+ {file = "hiredis-2.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a1c9b7d6d7bf35e1e2217b2847710154b11d25bf86b77bb7e190161f8b89917e"},
+ {file = "hiredis-2.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:dfbe939fdddbc7b90cab4124f3ddd6391099fb964f6dab3386aa8cf56f37b5ba"},
+ {file = "hiredis-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3a51cb4ea466276a845a940931357b4a876f903eabde514ba95e45050e1c2150"},
+ {file = "hiredis-2.1.0-cp38-cp38-win32.whl", hash = "sha256:8bce4c687136bf13df76072072b9baadbd52f7d1b143fbbda96387f50e8ebaeb"},
+ {file = "hiredis-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:1f94684b13fbbee1239303018d5ea900d786e486cdb130cde3144d53f4e262e4"},
+ {file = "hiredis-2.1.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:879668ffab582bdffd9f10f6c8797aac055db183f266e3aa3a6438ff0768bc29"},
+ {file = "hiredis-2.1.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f1d5a99de0fd02438f251e50ec64936d22d542c8e5d80bdec236f9713eeef334"},
+ {file = "hiredis-2.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab622bcddcf334b4b1fc4b22e163e93160e3afdd7feaedd77ac6f258e0c77b68"},
+ {file = "hiredis-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:964c4f23ff450fb8d73edf06fc7475a4e81a3f9b03a9a04a907ec81c84052fcf"},
+ {file = "hiredis-2.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9f8b8daef346ffc0268d7086c213ab24c2a3fcbd4249eacfbb3635602c79d20"},
+ {file = "hiredis-2.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e2039cdaa2e6656eae4a2e2537ed77e27f29b7487b97ce7ae6a3cb88d01b968"},
+ {file = "hiredis-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43d3168da0a81fa0a9e4bc6e14316beac8e5f1b439ca5cc5af7f9a558cfba741"},
+ {file = "hiredis-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0021ba034b74c5006f62e4cfdd79d04c7c720731eda256ce29d769ac6483adc3"},
+ {file = "hiredis-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:39a1bb45bcd698baf70ad4e9a94af164525bf053caea7df3777172d20d69538a"},
+ {file = "hiredis-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c1b636b05777536a83b4cced157cbdc2d0012d494a9ec2f7b7e07c54296cd773"},
+ {file = "hiredis-2.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:58a7ceb71f967fcc1878fb64666a12fbc5f243ab00d0653d3752a811941d8261"},
+ {file = "hiredis-2.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:c5263c676dc4d55202e7ca0429b949fc6ba7c0dd3a3a2b80538593ab27d82836"},
+ {file = "hiredis-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b5879d13025b04903ddf71921812db27fe1156a0952ad253014354d72463aaa9"},
+ {file = "hiredis-2.1.0-cp39-cp39-win32.whl", hash = "sha256:9259f637d77544ffeb97acb0a87fdd192a8aced7a2fbd7439160dbee8341d446"},
+ {file = "hiredis-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:fb818b6e0981e16dfdfc9e507c9842f8d210e6ecaf3edb8ac3039dbd24768839"},
+ {file = "hiredis-2.1.0-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:648d4648bf6b3dcc418a974df143b2f96627ab8b50bda23a57759c273880ecfb"},
+ {file = "hiredis-2.1.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:654949cfc0bc76a5292b6ac111113b2eafb0739e0496495368981ea2e80bf4ec"},
+ {file = "hiredis-2.1.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2a98b835c2088998a47da51b1b3661b587b2d4b3305d03fc9893888cc2aa54"},
+ {file = "hiredis-2.1.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7222bd9243387d778245619d0ac62d35cf72ee746ec0efb7b9b230ae3e0c3a39"},
+ {file = "hiredis-2.1.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:778f6de73c3abd67d447a3442f89e7d43a8de1eb5093f416af14dddc1d5c9cb5"},
+ {file = "hiredis-2.1.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c4cfb61fe642f30a22789055847004393bc65b5686988c64191e379ea4ccd069"},
+ {file = "hiredis-2.1.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03b6bef7eb50415aca87200a511d66a2fd69f1fcc75cfe1408e1201cbe28ddfb"},
+ {file = "hiredis-2.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3195e13a700f6ff35894c4920fcce8f6c2b01cdbc01f76fe567753c495849e9b"},
+ {file = "hiredis-2.1.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19f724405c808a89db422ed1010caab80a16d3e5b49632356ae7912513b6d58e"},
+ {file = "hiredis-2.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8ecebeff966b412138b0cd105d7572f8d5e65e96355af699863890f8370707e6"},
+ {file = "hiredis-2.1.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:4f34eefaf164bf43b29ccc809c168248eb95001837ed0e9e3279891f57ae2fab"},
+ {file = "hiredis-2.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11fad16beb9d623ea423c9129bab0e392ea4c84363d61c125f679be3d029442f"},
+ {file = "hiredis-2.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c763eb9a1414c4d665945c70ae2ef74a843600667b0069fe90e2aabc78e5411"},
+ {file = "hiredis-2.1.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edb7f156a8f8a1999574f27bda67dd2bff2d5b180bb6aed996a1792cafbcc668"},
+ {file = "hiredis-2.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e057d5545189d4c9e22ae0f7dc283ea0a225f56999511022c062cce7f9589d69"},
]
hyperlink = [
{file = "hyperlink-21.0.0-py2.py3-none-any.whl", hash = "sha256:e6b14c37ecb73e89c77d78cdb4c2cc8f3fb59a885c5b3f819ff4ed80f25af1b4"},
{file = "hyperlink-21.0.0.tar.gz", hash = "sha256:427af957daa58bc909471c6c40f74c5450fa123dd093fc53efd2e91d2705a56b"},
]
idna = [
- {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"},
- {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
]
ijson = [
{file = "ijson-3.1.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:6c1a777096be5f75ffebb335c6d2ebc0e489b231496b7f2ca903aa061fe7d381"},
@@ -1982,35 +2059,39 @@ importlib-metadata = [
{file = "importlib_metadata-4.2.0.tar.gz", hash = "sha256:b7e52a1f8dec14a75ea73e0891f3060099ca1d8e6a462a4dff11c3e119ea1b31"},
]
importlib-resources = [
- {file = "importlib_resources-5.4.0-py3-none-any.whl", hash = "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45"},
- {file = "importlib_resources-5.4.0.tar.gz", hash = "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b"},
+ {file = "importlib_resources-5.10.1-py3-none-any.whl", hash = "sha256:c09b067d82e72c66f4f8eb12332f5efbebc9b007c0b6c40818108c9870adc363"},
+ {file = "importlib_resources-5.10.1.tar.gz", hash = "sha256:32bb095bda29741f6ef0e5278c42df98d135391bee5f932841efc0041f748dc3"},
]
incremental = [
- {file = "incremental-21.3.0-py2.py3-none-any.whl", hash = "sha256:92014aebc6a20b78a8084cdd5645eeaa7f74b8933f70fa3ada2cfbd1e3b54321"},
- {file = "incremental-21.3.0.tar.gz", hash = "sha256:02f5de5aff48f6b9f665d99d48bfc7ec03b6e3943210de7cfc88856d755d6f57"},
+ {file = "incremental-22.10.0-py2.py3-none-any.whl", hash = "sha256:b864a1f30885ee72c5ac2835a761b8fe8aa9c28b9395cacf27286602688d3e51"},
+ {file = "incremental-22.10.0.tar.gz", hash = "sha256:912feeb5e0f7e0188e6f42241d2f450002e11bbc0937c65865045854c24c0bd0"},
]
isort = [
- {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"},
- {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"},
+ {file = "isort-5.11.2-py3-none-any.whl", hash = "sha256:e486966fba83f25b8045f8dd7455b0a0d1e4de481e1d7ce4669902d9fb85e622"},
+ {file = "isort-5.11.2.tar.gz", hash = "sha256:dd8bbc5c0990f2a095d754e50360915f73b4c26fc82733eb5bfc6b48396af4d2"},
]
jaeger-client = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
+"jaraco.classes" = [
+ {file = "jaraco.classes-3.2.3-py3-none-any.whl", hash = "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158"},
+ {file = "jaraco.classes-3.2.3.tar.gz", hash = "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"},
+]
jeepney = [
- {file = "jeepney-0.7.1-py3-none-any.whl", hash = "sha256:1b5a0ea5c0e7b166b2f5895b91a08c14de8915afda4407fb5022a195224958ac"},
- {file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"},
+ {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"},
+ {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"},
]
-jinja2 = [
- {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"},
- {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"},
+Jinja2 = [
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
]
jsonschema = [
- {file = "jsonschema-4.16.0-py3-none-any.whl", hash = "sha256:9e74b8f9738d6a946d70705dc692b74b5429cd0960d58e79ffecfc43b2221eb9"},
- {file = "jsonschema-4.16.0.tar.gz", hash = "sha256:165059f076eff6971bae5b742fc029a7b4ef3f9bcf04c14e4776a7605de14b23"},
+ {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
+ {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
]
keyring = [
- {file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
- {file = "keyring-23.5.0.tar.gz", hash = "sha256:9012508e141a80bd1c0b6778d5c610dd9f8c464d75ac6774248500503f972fb9"},
+ {file = "keyring-23.9.3-py3-none-any.whl", hash = "sha256:69732a15cb1433bdfbc3b980a8a36a04878a6cfd7cb99f497b573f31618001c0"},
+ {file = "keyring-23.9.3.tar.gz", hash = "sha256:69b01dd83c42f590250fe7a1f503fc229b14de83857314b1933a3ddbf595c4a5"},
]
ldap3 = [
{file = "ldap3-2.9.1-py2.6.egg", hash = "sha256:5ab7febc00689181375de40c396dcad4f2659cd260fc5e94c508b6d77c17e9d5"},
@@ -2020,118 +2101,124 @@ ldap3 = [
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
]
lxml = [
- {file = "lxml-4.9.1-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed"},
- {file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc"},
- {file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc"},
- {file = "lxml-4.9.1-cp27-cp27m-win32.whl", hash = "sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3"},
- {file = "lxml-4.9.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627"},
- {file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84"},
- {file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837"},
- {file = "lxml-4.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad"},
- {file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5"},
- {file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8"},
- {file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8"},
- {file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d"},
- {file = "lxml-4.9.1-cp310-cp310-win32.whl", hash = "sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7"},
- {file = "lxml-4.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b"},
- {file = "lxml-4.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d"},
- {file = "lxml-4.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3"},
- {file = "lxml-4.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29"},
- {file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d"},
- {file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318"},
- {file = "lxml-4.9.1-cp35-cp35m-win32.whl", hash = "sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7"},
- {file = "lxml-4.9.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4"},
- {file = "lxml-4.9.1-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb"},
- {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067"},
- {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536"},
- {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8"},
- {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b"},
- {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf"},
- {file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3"},
- {file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391"},
- {file = "lxml-4.9.1-cp36-cp36m-win32.whl", hash = "sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e"},
- {file = "lxml-4.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7"},
- {file = "lxml-4.9.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2"},
- {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc"},
- {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c"},
- {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4"},
- {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3"},
- {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca"},
- {file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785"},
- {file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785"},
- {file = "lxml-4.9.1-cp37-cp37m-win32.whl", hash = "sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a"},
- {file = "lxml-4.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e"},
- {file = "lxml-4.9.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b"},
- {file = "lxml-4.9.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97"},
- {file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21"},
- {file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2"},
- {file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130"},
- {file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715"},
- {file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036"},
- {file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387"},
- {file = "lxml-4.9.1-cp38-cp38-win32.whl", hash = "sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94"},
- {file = "lxml-4.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345"},
- {file = "lxml-4.9.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67"},
- {file = "lxml-4.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb"},
- {file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448"},
- {file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7"},
- {file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91"},
- {file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000"},
- {file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25"},
- {file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd"},
- {file = "lxml-4.9.1-cp39-cp39-win32.whl", hash = "sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb"},
- {file = "lxml-4.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d"},
- {file = "lxml-4.9.1-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c"},
- {file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b"},
- {file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc"},
- {file = "lxml-4.9.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b"},
- {file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2"},
- {file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73"},
- {file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c"},
- {file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9"},
- {file = "lxml-4.9.1.tar.gz", hash = "sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f"},
-]
-markupsafe = [
- {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-win32.whl", hash = "sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-win32.whl", hash = "sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-win32.whl", hash = "sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7"},
- {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"},
+ {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"},
+ {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"},
+ {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"},
+ {file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"},
+ {file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"},
+ {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"},
+ {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"},
+ {file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"},
+ {file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"},
+ {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"},
+ {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"},
+ {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"},
+ {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"},
+ {file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"},
+ {file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"},
+ {file = "lxml-4.9.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:6943826a0374fb135bb11843594eda9ae150fba9d1d027d2464c713da7c09afe"},
+ {file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"},
+ {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"},
+ {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"},
+ {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"},
+ {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"},
+ {file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"},
+ {file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"},
+ {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"},
+ {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"},
+ {file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"},
+ {file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"},
+ {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"},
+ {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"},
+ {file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"},
+ {file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"},
+ {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"},
+ {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"},
+ {file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"},
+ {file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"},
+ {file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"},
+ {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"},
+ {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"},
+ {file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"},
+ {file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"},
+ {file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"},
+ {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"},
+ {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"},
+ {file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"},
+ {file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"},
+ {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"},
+ {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"},
+ {file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"},
+ {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"},
+ {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"},
+ {file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"},
+ {file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"},
+ {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"},
+ {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"},
+ {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"},
+ {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"},
+]
+MarkupSafe = [
+ {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-win32.whl", hash = "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6"},
+ {file = "MarkupSafe-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff"},
+ {file = "MarkupSafe-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-win32.whl", hash = "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1"},
+ {file = "MarkupSafe-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-win32.whl", hash = "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c"},
+ {file = "MarkupSafe-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247"},
+ {file = "MarkupSafe-2.1.1.tar.gz", hash = "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b"},
]
matrix-common = [
{file = "matrix_common-1.3.0-py3-none-any.whl", hash = "sha256:524e2785b9b03be4d15f3a8a6b857c5b6af68791ffb1b9918f0ad299abc4db20"},
@@ -2142,8 +2229,12 @@ matrix-synapse-ldap3 = [
{file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"},
]
mccabe = [
- {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
- {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
+]
+more-itertools = [
+ {file = "more-itertools-9.0.0.tar.gz", hash = "sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab"},
+ {file = "more_itertools-9.0.0-py3-none-any.whl", hash = "sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41"},
]
msgpack = [
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
@@ -2241,86 +2332,114 @@ opentracing = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
packaging = [
- {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
- {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
+ {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"},
+ {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"},
]
parameterized = [
{file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"},
{file = "parameterized-0.8.1.tar.gz", hash = "sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c"},
]
pathspec = [
- {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"},
- {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
+ {file = "pathspec-0.10.3-py3-none-any.whl", hash = "sha256:3c95343af8b756205e2aba76e843ba9520a24dd84f68c22b9f93251507509dd6"},
+ {file = "pathspec-0.10.3.tar.gz", hash = "sha256:56200de4077d9d0791465aa9095a01d421861e405b5096955051deefd697d6f6"},
]
phonenumbers = [
- {file = "phonenumbers-8.12.44-py2.py3-none-any.whl", hash = "sha256:cc1299cf37b309ecab6214297663ab86cb3d64ae37fd5b88e904fe7983a874a6"},
- {file = "phonenumbers-8.12.44.tar.gz", hash = "sha256:26cfd0257d1704fe2f88caff2caabb70d16a877b1e65b6aae51f9fbbe10aa8ce"},
-]
-pillow = [
- {file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"},
- {file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"},
- {file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"},
- {file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"},
- {file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"},
- {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"},
- {file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"},
- {file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"},
- {file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"},
- {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"},
- {file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"},
- {file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"},
- {file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"},
- {file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"},
- {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"},
- {file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"},
- {file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"},
- {file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"},
- {file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"},
- {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"},
- {file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"},
- {file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"},
- {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"},
- {file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"},
- {file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"},
+ {file = "phonenumbers-8.13.2-py2.py3-none-any.whl", hash = "sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf"},
+ {file = "phonenumbers-8.13.2.tar.gz", hash = "sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a"},
+]
+Pillow = [
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win_amd64.whl", hash = "sha256:32a44128c4bdca7f31de5be641187367fe2a450ad83b833ef78910397db491aa"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:0b7257127d646ff8676ec8a15520013a698d1fdc48bc2a79ba4e53df792526f2"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b90f7616ea170e92820775ed47e136208e04c967271c9ef615b6fbd08d9af0e3"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68943d632f1f9e3dce98908e873b3a090f6cba1cbb1b892a9e8d97c938871fbe"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be55f8457cd1eac957af0c3f5ece7bc3f033f89b114ef30f710882717670b2a8"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d77adcd56a42d00cc1be30843d3426aa4e660cab4a61021dc84467123f7a00c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:829f97c8e258593b9daa80638aee3789b7df9da5cf1336035016d76f03b8860c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:801ec82e4188e935c7f5e22e006d01611d6b41661bba9fe45b60e7ac1a8f84de"},
+ {file = "Pillow-9.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:871b72c3643e516db4ecf20efe735deb27fe30ca17800e661d769faab45a18d7"},
+ {file = "Pillow-9.3.0-cp310-cp310-win32.whl", hash = "sha256:655a83b0058ba47c7c52e4e2df5ecf484c1b0b0349805896dd350cbc416bdd91"},
+ {file = "Pillow-9.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:9f47eabcd2ded7698106b05c2c338672d16a6f2a485e74481f524e2a23c2794b"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:57751894f6618fd4308ed8e0c36c333e2f5469744c34729a27532b3db106ee20"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7db8b751ad307d7cf238f02101e8e36a128a6cb199326e867d1398067381bff4"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3033fbe1feb1b59394615a1cafaee85e49d01b51d54de0cbf6aa8e64182518a1"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b012ea2d065fd163ca096f4e37e47cd8b59cf4b0fd47bfca6abb93df70b34c"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a65733d103311331875c1dca05cb4606997fd33d6acfed695b1232ba1df193"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:502526a2cbfa431d9fc2a079bdd9061a2397b842bb6bc4239bb176da00993812"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90fb88843d3902fe7c9586d439d1e8c05258f41da473952aa8b328d8b907498c"},
+ {file = "Pillow-9.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89dca0ce00a2b49024df6325925555d406b14aa3efc2f752dbb5940c52c56b11"},
+ {file = "Pillow-9.3.0-cp311-cp311-win32.whl", hash = "sha256:3168434d303babf495d4ba58fc22d6604f6e2afb97adc6a423e917dab828939c"},
+ {file = "Pillow-9.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:18498994b29e1cf86d505edcb7edbe814d133d2232d256db8c7a8ceb34d18cef"},
+ {file = "Pillow-9.3.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:772a91fc0e03eaf922c63badeca75e91baa80fe2f5f87bdaed4280662aad25c9"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa4107d1b306cdf8953edde0534562607fe8811b6c4d9a486298ad31de733b2"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4012d06c846dc2b80651b120e2cdd787b013deb39c09f407727ba90015c684f"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77ec3e7be99629898c9a6d24a09de089fa5356ee408cdffffe62d67bb75fdd72"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:6c738585d7a9961d8c2821a1eb3dcb978d14e238be3d70f0a706f7fa9316946b"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:828989c45c245518065a110434246c44a56a8b2b2f6347d1409c787e6e4651ee"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win32.whl", hash = "sha256:82409ffe29d70fd733ff3c1025a602abb3e67405d41b9403b00b01debc4c9a29"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:41e0051336807468be450d52b8edd12ac60bebaa97fe10c8b660f116e50b30e4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b03ae6f1a1878233ac620c98f3459f79fd77c7e3c2b20d460284e1fb370557d4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4390e9ce199fc1951fcfa65795f239a8a4944117b5935a9317fb320e7767b40f"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40e1ce476a7804b0fb74bcfa80b0a2206ea6a882938eaba917f7a0f004b42502"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a06a052c5f37b4ed81c613a455a81f9a3a69429b4fd7bb913c3fa98abefc20"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:15c42fb9dea42465dfd902fb0ecf584b8848ceb28b41ee2b58f866411be33f07"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:51e0e543a33ed92db9f5ef69a0356e0b1a7a6b6a71b80df99f1d181ae5875636"},
+ {file = "Pillow-9.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3dd6caf940756101205dffc5367babf288a30043d35f80936f9bfb37f8355b32"},
+ {file = "Pillow-9.3.0-cp38-cp38-win32.whl", hash = "sha256:f1ff2ee69f10f13a9596480335f406dd1f70c3650349e2be67ca3139280cade0"},
+ {file = "Pillow-9.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:276a5ca930c913f714e372b2591a22c4bd3b81a418c0f6635ba832daec1cbcfc"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:73bd195e43f3fadecfc50c682f5055ec32ee2c933243cafbfdec69ab1aa87cad"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c7c8ae3864846fc95f4611c78129301e203aaa2af813b703c55d10cc1628535"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0918e03aa0c72ea56edbb00d4d664294815aa11291a11504a377ea018330d3"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0915e734b33a474d76c28e07292f196cdf2a590a0d25bcc06e64e545f2d146c"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0372acb5d3598f36ec0914deed2a63f6bcdb7b606da04dc19a88d31bf0c05b"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ad58d27a5b0262c0c19b47d54c5802db9b34d38bbf886665b626aff83c74bacd"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:97aabc5c50312afa5e0a2b07c17d4ac5e865b250986f8afe2b02d772567a380c"},
+ {file = "Pillow-9.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9aaa107275d8527e9d6e7670b64aabaaa36e5b6bd71a1015ddd21da0d4e06448"},
+ {file = "Pillow-9.3.0-cp39-cp39-win32.whl", hash = "sha256:bac18ab8d2d1e6b4ce25e3424f709aceef668347db8637c2296bcf41acb7cf48"},
+ {file = "Pillow-9.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b472b5ea442148d1c3e2209f20f1e0bb0eb556538690fa70b5e1f79fa0ba8dc2"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ab388aaa3f6ce52ac1cb8e122c4bd46657c15905904b3120a6248b5b8b0bc228"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbb8e7f2abee51cef77673be97760abff1674ed32847ce04b4af90f610144c7b"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca31dd6014cb8b0b2db1e46081b0ca7d936f856da3b39744aef499db5d84d02"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c7025dce65566eb6e89f56c9509d4f628fddcedb131d9465cacd3d8bac337e7e"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ebf2029c1f464c59b8bdbe5143c79fa2045a581ac53679733d3a91d400ff9efb"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b59430236b8e58840a0dfb4099a0e8717ffb779c952426a69ae435ca1f57210c"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12ce4932caf2ddf3e41d17fc9c02d67126935a44b86df6a206cf0d7161548627"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae5331c23ce118c53b172fa64a4c037eb83c9165aba3a7ba9ddd3ec9fa64a699"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0b07fffc13f474264c336298d1b4ce01d9c5a011415b79d4ee5527bb69ae6f65"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8"},
+ {file = "Pillow-9.3.0.tar.gz", hash = "sha256:c935a22a557a560108d780f9a0fc426dd7459940dc54faa49d83249c8d3e760f"},
]
pkginfo = [
- {file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"},
- {file = "pkginfo-1.8.2.tar.gz", hash = "sha256:542e0d0b6750e2e21c20179803e40ab50598d8066d51097a0e382cba9eb02bff"},
+ {file = "pkginfo-1.9.2-py3-none-any.whl", hash = "sha256:d580059503f2f4549ad6e4c106d7437356dbd430e2c7df99ee1efe03d75f691e"},
+ {file = "pkginfo-1.9.2.tar.gz", hash = "sha256:ac03e37e4d601aaee40f8087f63fc4a2a6c9814dda2c8fa6aab1b1829653bdfa"},
]
pkgutil_resolve_name = [
{file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
{file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
]
platformdirs = [
- {file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"},
- {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
+ {file = "platformdirs-2.6.0-py3-none-any.whl", hash = "sha256:1a89a12377800c81983db6be069ec068eee989748799b946cce2a6e80dcc54ca"},
+ {file = "platformdirs-2.6.0.tar.gz", hash = "sha256:b46ffafa316e6b83b47489d240ce17173f123a9b9c83282141c3daf26ad9ac2e"},
]
prometheus-client = [
- {file = "prometheus_client-0.14.0-py3-none-any.whl", hash = "sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2"},
- {file = "prometheus_client-0.14.0.tar.gz", hash = "sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750"},
+ {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"},
+ {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"},
]
psycopg2 = [
- {file = "psycopg2-2.9.3-cp310-cp310-win32.whl", hash = "sha256:083707a696e5e1c330af2508d8fab36f9700b26621ccbcb538abe22e15485362"},
- {file = "psycopg2-2.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:d3ca6421b942f60c008f81a3541e8faf6865a28d5a9b48544b0ee4f40cac7fca"},
- {file = "psycopg2-2.9.3-cp36-cp36m-win32.whl", hash = "sha256:9572e08b50aed176ef6d66f15a21d823bb6f6d23152d35e8451d7d2d18fdac56"},
- {file = "psycopg2-2.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:a81e3866f99382dfe8c15a151f1ca5fde5815fde879348fe5a9884a7c092a305"},
- {file = "psycopg2-2.9.3-cp37-cp37m-win32.whl", hash = "sha256:cb10d44e6694d763fa1078a26f7f6137d69f555a78ec85dc2ef716c37447e4b2"},
- {file = "psycopg2-2.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:4295093a6ae3434d33ec6baab4ca5512a5082cc43c0505293087b8a46d108461"},
- {file = "psycopg2-2.9.3-cp38-cp38-win32.whl", hash = "sha256:34b33e0162cfcaad151f249c2649fd1030010c16f4bbc40a604c1cb77173dcf7"},
- {file = "psycopg2-2.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:0762c27d018edbcb2d34d51596e4346c983bd27c330218c56c4dc25ef7e819bf"},
- {file = "psycopg2-2.9.3-cp39-cp39-win32.whl", hash = "sha256:8cf3878353cc04b053822896bc4922b194792df9df2f1ad8da01fb3043602126"},
- {file = "psycopg2-2.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:06f32425949bd5fe8f625c49f17ebb9784e1e4fe928b7cce72edc36fb68e4c0c"},
- {file = "psycopg2-2.9.3.tar.gz", hash = "sha256:8e841d1bf3434da985cc5ef13e6f75c8981ced601fd70cc6bf33351b91562981"},
+ {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"},
+ {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"},
+ {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"},
+ {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"},
+ {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"},
+ {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"},
+ {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"},
+ {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"},
+ {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"},
+ {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"},
+ {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"},
+ {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"},
+ {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"},
]
psycopg2cffi = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
@@ -2359,75 +2478,79 @@ pyasn1-modules = [
{file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"},
]
pycodestyle = [
- {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"},
- {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"},
+ {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"},
+ {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"},
]
pycparser = [
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
]
pydantic = [
- {file = "pydantic-1.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8098a724c2784bf03e8070993f6d46aa2eeca031f8d8a048dff277703e6e193"},
- {file = "pydantic-1.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c320c64dd876e45254bdd350f0179da737463eea41c43bacbee9d8c9d1021f11"},
- {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18f3e912f9ad1bdec27fb06b8198a2ccc32f201e24174cec1b3424dda605a310"},
- {file = "pydantic-1.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11951b404e08b01b151222a1cb1a9f0a860a8153ce8334149ab9199cd198131"},
- {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8bc541a405423ce0e51c19f637050acdbdf8feca34150e0d17f675e72d119580"},
- {file = "pydantic-1.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e565a785233c2d03724c4dc55464559639b1ba9ecf091288dd47ad9c629433bd"},
- {file = "pydantic-1.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a4a88dcd6ff8fd47c18b3a3709a89adb39a6373f4482e04c1b765045c7e282fd"},
- {file = "pydantic-1.9.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:447d5521575f18e18240906beadc58551e97ec98142266e521c34968c76c8761"},
- {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:985ceb5d0a86fcaa61e45781e567a59baa0da292d5ed2e490d612d0de5796918"},
- {file = "pydantic-1.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059b6c1795170809103a1538255883e1983e5b831faea6558ef873d4955b4a74"},
- {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d12f96b5b64bec3f43c8e82b4aab7599d0157f11c798c9f9c528a72b9e0b339a"},
- {file = "pydantic-1.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ae72f8098acb368d877b210ebe02ba12585e77bd0db78ac04a1ee9b9f5dd2166"},
- {file = "pydantic-1.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:79b485767c13788ee314669008d01f9ef3bc05db9ea3298f6a50d3ef596a154b"},
- {file = "pydantic-1.9.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:494f7c8537f0c02b740c229af4cb47c0d39840b829ecdcfc93d91dcbb0779892"},
- {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0f047e11febe5c3198ed346b507e1d010330d56ad615a7e0a89fae604065a0e"},
- {file = "pydantic-1.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:969dd06110cb780da01336b281f53e2e7eb3a482831df441fb65dd30403f4608"},
- {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:177071dfc0df6248fd22b43036f936cfe2508077a72af0933d0c1fa269b18537"},
- {file = "pydantic-1.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9bcf8b6e011be08fb729d110f3e22e654a50f8a826b0575c7196616780683380"},
- {file = "pydantic-1.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a955260d47f03df08acf45689bd163ed9df82c0e0124beb4251b1290fa7ae728"},
- {file = "pydantic-1.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ce157d979f742a915b75f792dbd6aa63b8eccaf46a1005ba03aa8a986bde34a"},
- {file = "pydantic-1.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0bf07cab5b279859c253d26a9194a8906e6f4a210063b84b433cf90a569de0c1"},
- {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d93d4e95eacd313d2c765ebe40d49ca9dd2ed90e5b37d0d421c597af830c195"},
- {file = "pydantic-1.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1542636a39c4892c4f4fa6270696902acb186a9aaeac6f6cf92ce6ae2e88564b"},
- {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a9af62e9b5b9bc67b2a195ebc2c2662fdf498a822d62f902bf27cccb52dbbf49"},
- {file = "pydantic-1.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fe4670cb32ea98ffbf5a1262f14c3e102cccd92b1869df3bb09538158ba90fe6"},
- {file = "pydantic-1.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:9f659a5ee95c8baa2436d392267988fd0f43eb774e5eb8739252e5a7e9cf07e0"},
- {file = "pydantic-1.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b83ba3825bc91dfa989d4eed76865e71aea3a6ca1388b59fc801ee04c4d8d0d6"},
- {file = "pydantic-1.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1dd8fecbad028cd89d04a46688d2fcc14423e8a196d5b0a5c65105664901f810"},
- {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02eefd7087268b711a3ff4db528e9916ac9aa18616da7bca69c1871d0b7a091f"},
- {file = "pydantic-1.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7eb57ba90929bac0b6cc2af2373893d80ac559adda6933e562dcfb375029acee"},
- {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4ce9ae9e91f46c344bec3b03d6ee9612802682c1551aaf627ad24045ce090761"},
- {file = "pydantic-1.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:72ccb318bf0c9ab97fc04c10c37683d9eea952ed526707fabf9ac5ae59b701fd"},
- {file = "pydantic-1.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:61b6760b08b7c395975d893e0b814a11cf011ebb24f7d869e7118f5a339a82e1"},
- {file = "pydantic-1.9.1-py3-none-any.whl", hash = "sha256:4988c0f13c42bfa9ddd2fe2f569c9d54646ce84adc5de84228cfe83396f3bd58"},
- {file = "pydantic-1.9.1.tar.gz", hash = "sha256:1ed987c3ff29fff7fd8c3ea3a3ea877ad310aae2ef9889a119e22d3f2db0691a"},
+ {file = "pydantic-1.10.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bb6ad4489af1bac6955d38ebcb95079a836af31e4c4f74aba1ca05bb9f6027bd"},
+ {file = "pydantic-1.10.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a1f5a63a6dfe19d719b1b6e6106561869d2efaca6167f84f5ab9347887d78b98"},
+ {file = "pydantic-1.10.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:352aedb1d71b8b0736c6d56ad2bd34c6982720644b0624462059ab29bd6e5912"},
+ {file = "pydantic-1.10.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19b3b9ccf97af2b7519c42032441a891a5e05c68368f40865a90eb88833c2559"},
+ {file = "pydantic-1.10.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9069e1b01525a96e6ff49e25876d90d5a563bc31c658289a8772ae186552236"},
+ {file = "pydantic-1.10.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:355639d9afc76bcb9b0c3000ddcd08472ae75318a6eb67a15866b87e2efa168c"},
+ {file = "pydantic-1.10.2-cp310-cp310-win_amd64.whl", hash = "sha256:ae544c47bec47a86bc7d350f965d8b15540e27e5aa4f55170ac6a75e5f73b644"},
+ {file = "pydantic-1.10.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a4c805731c33a8db4b6ace45ce440c4ef5336e712508b4d9e1aafa617dc9907f"},
+ {file = "pydantic-1.10.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d49f3db871575e0426b12e2f32fdb25e579dea16486a26e5a0474af87cb1ab0a"},
+ {file = "pydantic-1.10.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c90345ec7dd2f1bcef82ce49b6235b40f282b94d3eec47e801baf864d15525"},
+ {file = "pydantic-1.10.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b5ba54d026c2bd2cb769d3468885f23f43710f651688e91f5fb1edcf0ee9283"},
+ {file = "pydantic-1.10.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05e00dbebbe810b33c7a7362f231893183bcc4251f3f2ff991c31d5c08240c42"},
+ {file = "pydantic-1.10.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2d0567e60eb01bccda3a4df01df677adf6b437958d35c12a3ac3e0f078b0ee52"},
+ {file = "pydantic-1.10.2-cp311-cp311-win_amd64.whl", hash = "sha256:c6f981882aea41e021f72779ce2a4e87267458cc4d39ea990729e21ef18f0f8c"},
+ {file = "pydantic-1.10.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4aac8e7103bf598373208f6299fa9a5cfd1fc571f2d40bf1dd1955a63d6eeb5"},
+ {file = "pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a7b66c3f499108b448f3f004801fcd7d7165fb4200acb03f1c2402da73ce4c"},
+ {file = "pydantic-1.10.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bedf309630209e78582ffacda64a21f96f3ed2e51fbf3962d4d488e503420254"},
+ {file = "pydantic-1.10.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9300fcbebf85f6339a02c6994b2eb3ff1b9c8c14f502058b5bf349d42447dcf5"},
+ {file = "pydantic-1.10.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:216f3bcbf19c726b1cc22b099dd409aa371f55c08800bcea4c44c8f74b73478d"},
+ {file = "pydantic-1.10.2-cp37-cp37m-win_amd64.whl", hash = "sha256:dd3f9a40c16daf323cf913593083698caee97df2804aa36c4b3175d5ac1b92a2"},
+ {file = "pydantic-1.10.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b97890e56a694486f772d36efd2ba31612739bc6f3caeee50e9e7e3ebd2fdd13"},
+ {file = "pydantic-1.10.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9cabf4a7f05a776e7793e72793cd92cc865ea0e83a819f9ae4ecccb1b8aa6116"},
+ {file = "pydantic-1.10.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06094d18dd5e6f2bbf93efa54991c3240964bb663b87729ac340eb5014310624"},
+ {file = "pydantic-1.10.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc78cc83110d2f275ec1970e7a831f4e371ee92405332ebfe9860a715f8336e1"},
+ {file = "pydantic-1.10.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ee433e274268a4b0c8fde7ad9d58ecba12b069a033ecc4645bb6303c062d2e9"},
+ {file = "pydantic-1.10.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7c2abc4393dea97a4ccbb4ec7d8658d4e22c4765b7b9b9445588f16c71ad9965"},
+ {file = "pydantic-1.10.2-cp38-cp38-win_amd64.whl", hash = "sha256:0b959f4d8211fc964772b595ebb25f7652da3f22322c007b6fed26846a40685e"},
+ {file = "pydantic-1.10.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c33602f93bfb67779f9c507e4d69451664524389546bacfe1bee13cae6dc7488"},
+ {file = "pydantic-1.10.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5760e164b807a48a8f25f8aa1a6d857e6ce62e7ec83ea5d5c5a802eac81bad41"},
+ {file = "pydantic-1.10.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6eb843dcc411b6a2237a694f5e1d649fc66c6064d02b204a7e9d194dff81eb4b"},
+ {file = "pydantic-1.10.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b8795290deaae348c4eba0cebb196e1c6b98bdbe7f50b2d0d9a4a99716342fe"},
+ {file = "pydantic-1.10.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e0bedafe4bc165ad0a56ac0bd7695df25c50f76961da29c050712596cf092d6d"},
+ {file = "pydantic-1.10.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e05aed07fa02231dbf03d0adb1be1d79cabb09025dd45aa094aa8b4e7b9dcda"},
+ {file = "pydantic-1.10.2-cp39-cp39-win_amd64.whl", hash = "sha256:c1ba1afb396148bbc70e9eaa8c06c1716fdddabaf86e7027c5988bae2a829ab6"},
+ {file = "pydantic-1.10.2-py3-none-any.whl", hash = "sha256:1b6ee725bd6e83ec78b1aa32c5b1fa67a3a65badddde3976bca5fe4568f27709"},
+ {file = "pydantic-1.10.2.tar.gz", hash = "sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410"},
]
pyflakes = [
- {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"},
- {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"},
+ {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"},
+ {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"},
]
-pygithub = [
- {file = "PyGithub-1.55-py3-none-any.whl", hash = "sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b"},
- {file = "PyGithub-1.55.tar.gz", hash = "sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283"},
+PyGithub = [
+ {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"},
+ {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"},
]
-pygments = [
- {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
- {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"},
+Pygments = [
+ {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"},
+ {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"},
]
-pyjwt = [
- {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"},
- {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
+PyICU = [
+ {file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"},
+]
+PyJWT = [
+ {file = "PyJWT-2.6.0-py3-none-any.whl", hash = "sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14"},
+ {file = "PyJWT-2.6.0.tar.gz", hash = "sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd"},
]
pymacaroons = [
{file = "pymacaroons-0.13.0-py2.py3-none-any.whl", hash = "sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907"},
{file = "pymacaroons-0.13.0.tar.gz", hash = "sha256:1e6bba42a5f66c245adf38a5a4006a99dcc06a0703786ea636098667d42903b8"},
]
-pympler = [
+Pympler = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
]
-pynacl = [
+PyNaCl = [
{file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"},
{file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"},
{file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"},
@@ -2439,54 +2562,51 @@ pynacl = [
{file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"},
{file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"},
]
-pyopenssl = [
- {file = "pyOpenSSL-22.0.0-py2.py3-none-any.whl", hash = "sha256:ea252b38c87425b64116f808355e8da644ef9b07e429398bfece610f893ee2e0"},
- {file = "pyOpenSSL-22.0.0.tar.gz", hash = "sha256:660b1b1425aac4a1bea1d94168a85d99f0b3144c869dd4390d27629d0087f1bf"},
-]
-pyparsing = [
- {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"},
- {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"},
+pyOpenSSL = [
+ {file = "pyOpenSSL-22.1.0-py3-none-any.whl", hash = "sha256:b28437c9773bb6c6958628cf9c3bebe585de661dba6f63df17111966363dd15e"},
+ {file = "pyOpenSSL-22.1.0.tar.gz", hash = "sha256:7a83b7b272dd595222d672f5ce29aa030f1fb837630ef229f62e72e395ce8968"},
]
pyrsistent = [
- {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"},
- {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"},
- {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"},
- {file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"},
- {file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"},
- {file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"},
- {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"},
- {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"},
- {file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"},
- {file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"},
- {file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"},
- {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"},
- {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"},
- {file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"},
- {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"},
- {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"},
+ {file = "pyrsistent-0.19.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d6982b5a0237e1b7d876b60265564648a69b14017f3b5f908c5be2de3f9abb7a"},
+ {file = "pyrsistent-0.19.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:187d5730b0507d9285a96fca9716310d572e5464cadd19f22b63a6976254d77a"},
+ {file = "pyrsistent-0.19.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed"},
+ {file = "pyrsistent-0.19.2-cp310-cp310-win32.whl", hash = "sha256:456cb30ca8bff00596519f2c53e42c245c09e1a4543945703acd4312949bfd41"},
+ {file = "pyrsistent-0.19.2-cp310-cp310-win_amd64.whl", hash = "sha256:b39725209e06759217d1ac5fcdb510e98670af9e37223985f330b611f62e7425"},
+ {file = "pyrsistent-0.19.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2aede922a488861de0ad00c7630a6e2d57e8023e4be72d9d7147a9fcd2d30712"},
+ {file = "pyrsistent-0.19.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879b4c2f4d41585c42df4d7654ddffff1239dc4065bc88b745f0341828b83e78"},
+ {file = "pyrsistent-0.19.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c43bec251bbd10e3cb58ced80609c5c1eb238da9ca78b964aea410fb820d00d6"},
+ {file = "pyrsistent-0.19.2-cp37-cp37m-win32.whl", hash = "sha256:d690b18ac4b3e3cab73b0b7aa7dbe65978a172ff94970ff98d82f2031f8971c2"},
+ {file = "pyrsistent-0.19.2-cp37-cp37m-win_amd64.whl", hash = "sha256:3ba4134a3ff0fc7ad225b6b457d1309f4698108fb6b35532d015dca8f5abed73"},
+ {file = "pyrsistent-0.19.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a178209e2df710e3f142cbd05313ba0c5ebed0a55d78d9945ac7a4e09d923308"},
+ {file = "pyrsistent-0.19.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e371b844cec09d8dc424d940e54bba8f67a03ebea20ff7b7b0d56f526c71d584"},
+ {file = "pyrsistent-0.19.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb"},
+ {file = "pyrsistent-0.19.2-cp38-cp38-win32.whl", hash = "sha256:e5d8f84d81e3729c3b506657dddfe46e8ba9c330bf1858ee33108f8bb2adb38a"},
+ {file = "pyrsistent-0.19.2-cp38-cp38-win_amd64.whl", hash = "sha256:9cd3e9978d12b5d99cbdc727a3022da0430ad007dacf33d0bf554b96427f33ab"},
+ {file = "pyrsistent-0.19.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f1258f4e6c42ad0b20f9cfcc3ada5bd6b83374516cd01c0960e3cb75fdca6770"},
+ {file = "pyrsistent-0.19.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21455e2b16000440e896ab99e8304617151981ed40c29e9507ef1c2e4314ee95"},
+ {file = "pyrsistent-0.19.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd880614c6237243ff53a0539f1cb26987a6dc8ac6e66e0c5a40617296a045e"},
+ {file = "pyrsistent-0.19.2-cp39-cp39-win32.whl", hash = "sha256:71d332b0320642b3261e9fee47ab9e65872c2bd90260e5d225dabeed93cbd42b"},
+ {file = "pyrsistent-0.19.2-cp39-cp39-win_amd64.whl", hash = "sha256:dec3eac7549869365fe263831f576c8457f6c833937c68542d08fde73457d291"},
+ {file = "pyrsistent-0.19.2-py3-none-any.whl", hash = "sha256:ea6b79a02a28550c98b6ca9c35b9f492beaa54d7c5c9e9949555893c8a9234d0"},
+ {file = "pyrsistent-0.19.2.tar.gz", hash = "sha256:bfa0351be89c9fcbcb8c9879b826f4353be10f58f8a677efab0c017bf7137ec2"},
]
pysaml2 = [
- {file = "pysaml2-7.1.2-py2.py3-none-any.whl", hash = "sha256:d915961aaa4d4d97d952b30fe5d18d64cf053465acf3e38d8090b36c5ff08325"},
- {file = "pysaml2-7.1.2.tar.gz", hash = "sha256:1ec94442306511b93fe7a5710f224e05e0aba948682d506614d1e04f3232f827"},
+ {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"},
+ {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"},
]
python-dateutil = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
]
pytz = [
- {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"},
- {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"},
+ {file = "pytz-2022.6-py2.py3-none-any.whl", hash = "sha256:222439474e9c98fced559f1709d89e6c9cbf8d79c794ff3eb9f8800064291427"},
+ {file = "pytz-2022.6.tar.gz", hash = "sha256:e89512406b793ca39f5971bc999cc538ce125c0e51c27941bef4568b460095e2"},
]
pywin32-ctypes = [
{file = "pywin32-ctypes-0.2.0.tar.gz", hash = "sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942"},
{file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"},
]
-pyyaml = [
+PyYAML = [
{file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
{file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
{file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
@@ -2529,111 +2649,115 @@ pyyaml = [
{file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
]
readme-renderer = [
- {file = "readme_renderer-33.0-py3-none-any.whl", hash = "sha256:f02cee0c4de9636b5a62b6be50c9742427ba1b956aad1d938bfb087d0d72ccdf"},
- {file = "readme_renderer-33.0.tar.gz", hash = "sha256:e3b53bc84bd6af054e4cc1fe3567dc1ae19f554134221043a3f8c674e22209db"},
+ {file = "readme_renderer-37.3-py3-none-any.whl", hash = "sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343"},
+ {file = "readme_renderer-37.3.tar.gz", hash = "sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273"},
]
requests = [
- {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"},
- {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"},
+ {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"},
+ {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"},
]
requests-toolbelt = [
- {file = "requests-toolbelt-0.9.1.tar.gz", hash = "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0"},
- {file = "requests_toolbelt-0.9.1-py2.py3-none-any.whl", hash = "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f"},
+ {file = "requests-toolbelt-0.10.1.tar.gz", hash = "sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d"},
+ {file = "requests_toolbelt-0.10.1-py2.py3-none-any.whl", hash = "sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7"},
]
rfc3986 = [
{file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"},
{file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"},
]
-secretstorage = [
- {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
- {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
+rich = [
+ {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"},
+ {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"},
+]
+SecretStorage = [
+ {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"},
+ {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"},
]
semantic-version = [
{file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"},
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
]
sentry-sdk = [
- {file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
- {file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
+ {file = "sentry-sdk-1.11.1.tar.gz", hash = "sha256:675f6279b6bb1fea09fd61751061f9a90dca3b5929ef631dd50dc8b3aeb245e9"},
+ {file = "sentry_sdk-1.11.1-py2.py3-none-any.whl", hash = "sha256:8b4ff696c0bdcceb3f70bbb87a57ba84fd3168b1332d493fcd16c137f709578c"},
]
service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
{file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"},
]
setuptools = [
- {file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"},
- {file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
+ {file = "setuptools-65.6.3-py3-none-any.whl", hash = "sha256:57f6f22bde4e042978bcd50176fdb381d7c21a9efa4041202288d3737a0c6a54"},
+ {file = "setuptools-65.6.3.tar.gz", hash = "sha256:a7620757bf984b58deaf32fc8a4577a9bbc0850cf92c20e1ce41c38c19e5fb75"},
]
setuptools-rust = [
- {file = "setuptools-rust-1.5.1.tar.gz", hash = "sha256:0e05e456645d59429cb1021370aede73c0760e9360bbfdaaefb5bced530eb9d7"},
- {file = "setuptools_rust-1.5.1-py3-none-any.whl", hash = "sha256:306b236ff3aa5229180e58292610d0c2c51bb488191122d2fc559ae4caeb7d5e"},
+ {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"},
+ {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"},
]
signedjson = [
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
{file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"},
]
simplejson = [
- {file = "simplejson-3.17.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a89acae02b2975b1f8e4974cb8cdf9bf9f6c91162fb8dec50c259ce700f2770a"},
- {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:82ff356ff91be0ab2293fc6d8d262451eb6ac4fd999244c4b5f863e049ba219c"},
- {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0de783e9c2b87bdd75b57efa2b6260c24b94605b5c9843517577d40ee0c3cc8a"},
- {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:d24a9e61df7a7787b338a58abfba975414937b609eb6b18973e25f573bc0eeeb"},
- {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:e8603e691580487f11306ecb066c76f1f4a8b54fb3bdb23fa40643a059509366"},
- {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:9b01e7b00654115965a206e3015f0166674ec1e575198a62a977355597c0bef5"},
- {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:37bc0cf0e5599f36072077e56e248f3336917ded1d33d2688624d8ed3cefd7d2"},
- {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:cf6e7d5fe2aeb54898df18db1baf479863eae581cce05410f61f6b4188c8ada1"},
- {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:bdfc54b4468ed4cd7415928cbe782f4d782722a81aeb0f81e2ddca9932632211"},
- {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd16302d39c4d6f4afde80edd0c97d4db643327d355a312762ccd9bd2ca515ed"},
- {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:deac4bdafa19bbb89edfb73b19f7f69a52d0b5bd3bb0c4ad404c1bbfd7b4b7fd"},
- {file = "simplejson-3.17.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8bbdb166e2fb816e43ab034c865147edafe28e1b19c72433147789ac83e2dda"},
- {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7854326920d41c3b5d468154318fe6ba4390cb2410480976787c640707e0180"},
- {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:04e31fa6ac8e326480703fb6ded1488bfa6f1d3f760d32e29dbf66d0838982ce"},
- {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f63600ec06982cdf480899026f4fda622776f5fabed9a869fdb32d72bc17e99a"},
- {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e03c3b8cc7883a54c3f34a6a135c4a17bc9088a33f36796acdb47162791b02f6"},
- {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a2d30d6c1652140181dc6861f564449ad71a45e4f165a6868c27d36745b65d40"},
- {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a1aa6e4cae8e3b8d5321be4f51c5ce77188faf7baa9fe1e78611f93a8eed2882"},
- {file = "simplejson-3.17.6-cp310-cp310-win32.whl", hash = "sha256:97202f939c3ff341fc3fa84d15db86156b1edc669424ba20b0a1fcd4a796a045"},
- {file = "simplejson-3.17.6-cp310-cp310-win_amd64.whl", hash = "sha256:80d3bc9944be1d73e5b1726c3bbfd2628d3d7fe2880711b1eb90b617b9b8ac70"},
- {file = "simplejson-3.17.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9fa621b3c0c05d965882c920347b6593751b7ab20d8fa81e426f1735ca1a9fc7"},
- {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2fb11922f58df8528adfca123f6a84748ad17d066007e7ac977720063556bd"},
- {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:724c1fe135aa437d5126138d977004d165a3b5e2ee98fc4eb3e7c0ef645e7e27"},
- {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ff4ac6ff3aa8f814ac0f50bf218a2e1a434a17aafad4f0400a57a8cc62ef17f"},
- {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:67093a526e42981fdd954868062e56c9b67fdd7e712616cc3265ad0c210ecb51"},
- {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:5d6b4af7ad7e4ac515bc6e602e7b79e2204e25dbd10ab3aa2beef3c5a9cad2c7"},
- {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:1c9b1ed7ed282b36571638297525f8ef80f34b3e2d600a56f962c6044f24200d"},
- {file = "simplejson-3.17.6-cp36-cp36m-win32.whl", hash = "sha256:632ecbbd2228575e6860c9e49ea3cc5423764d5aa70b92acc4e74096fb434044"},
- {file = "simplejson-3.17.6-cp36-cp36m-win_amd64.whl", hash = "sha256:4c09868ddb86bf79b1feb4e3e7e4a35cd6e61ddb3452b54e20cf296313622566"},
- {file = "simplejson-3.17.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b6bd8144f15a491c662f06814bd8eaa54b17f26095bb775411f39bacaf66837"},
- {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5decdc78849617917c206b01e9fc1d694fd58caa961be816cb37d3150d613d9a"},
- {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:521877c7bd060470806eb6335926e27453d740ac1958eaf0d8c00911bc5e1802"},
- {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:65b998193bd7b0c7ecdfffbc825d808eac66279313cb67d8892bb259c9d91494"},
- {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ac786f6cb7aa10d44e9641c7a7d16d7f6e095b138795cd43503769d4154e0dc2"},
- {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3ff5b3464e1ce86a8de8c88e61d4836927d5595c2162cab22e96ff551b916e81"},
- {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:69bd56b1d257a91e763256d63606937ae4eb890b18a789b66951c00062afec33"},
- {file = "simplejson-3.17.6-cp37-cp37m-win32.whl", hash = "sha256:b81076552d34c27e5149a40187a8f7e2abb2d3185576a317aaf14aeeedad862a"},
- {file = "simplejson-3.17.6-cp37-cp37m-win_amd64.whl", hash = "sha256:07ecaafc1b1501f275bf5acdee34a4ad33c7c24ede287183ea77a02dc071e0c0"},
- {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:068670af975247acbb9fc3d5393293368cda17026db467bf7a51548ee8f17ee1"},
- {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4d1c135af0c72cb28dd259cf7ba218338f4dc027061262e46fe058b4e6a4c6a3"},
- {file = "simplejson-3.17.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23fe704da910ff45e72543cbba152821685a889cf00fc58d5c8ee96a9bad5f94"},
- {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f444762fed1bc1fd75187ef14a20ed900c1fbb245d45be9e834b822a0223bc81"},
- {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:681eb4d37c9a9a6eb9b3245a5e89d7f7b2b9895590bb08a20aa598c1eb0a1d9d"},
- {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8e8607d8f6b4f9d46fee11447e334d6ab50e993dd4dbfb22f674616ce20907ab"},
- {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b10556817f09d46d420edd982dd0653940b90151d0576f09143a8e773459f6fe"},
- {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e1ec8a9ee0987d4524ffd6299e778c16cc35fef6d1a2764e609f90962f0b293a"},
- {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0b4126cac7d69ac06ff22efd3e0b3328a4a70624fcd6bca4fc1b4e6d9e2e12bf"},
- {file = "simplejson-3.17.6-cp38-cp38-win32.whl", hash = "sha256:35a49ebef25f1ebdef54262e54ae80904d8692367a9f208cdfbc38dbf649e00a"},
- {file = "simplejson-3.17.6-cp38-cp38-win_amd64.whl", hash = "sha256:743cd768affaa508a21499f4858c5b824ffa2e1394ed94eb85caf47ac0732198"},
- {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb62d517a516128bacf08cb6a86ecd39fb06d08e7c4980251f5d5601d29989ba"},
- {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:12133863178a8080a3dccbf5cb2edfab0001bc41e5d6d2446af2a1131105adfe"},
- {file = "simplejson-3.17.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5540fba2d437edaf4aa4fbb80f43f42a8334206ad1ad3b27aef577fd989f20d9"},
- {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d74ee72b5071818a1a5dab47338e87f08a738cb938a3b0653b9e4d959ddd1fd9"},
- {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:28221620f4dcabdeac310846629b976e599a13f59abb21616356a85231ebd6ad"},
- {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b09bc62e5193e31d7f9876220fb429ec13a6a181a24d897b9edfbbdbcd678851"},
- {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7255a37ff50593c9b2f1afa8fafd6ef5763213c1ed5a9e2c6f5b9cc925ab979f"},
- {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:401d40969cee3df7bda211e57b903a534561b77a7ade0dd622a8d1a31eaa8ba7"},
- {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a649d0f66029c7eb67042b15374bd93a26aae202591d9afd71e111dd0006b198"},
- {file = "simplejson-3.17.6-cp39-cp39-win32.whl", hash = "sha256:522fad7be85de57430d6d287c4b635813932946ebf41b913fe7e880d154ade2e"},
- {file = "simplejson-3.17.6-cp39-cp39-win_amd64.whl", hash = "sha256:3fe87570168b2ae018391e2b43fbf66e8593a86feccb4b0500d134c998983ccc"},
- {file = "simplejson-3.17.6.tar.gz", hash = "sha256:cf98038d2abf63a1ada5730e91e84c642ba6c225b0198c3684151b1f80c5f8a6"},
+ {file = "simplejson-3.18.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:17dbc7f71fa5b7e4a2acef38cf0be30461ae6659456a978ce7eeebeb5bdf9e1a"},
+ {file = "simplejson-3.18.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:7a4d9b266ae6db578719f1255c742e76ee4676593087f4f6b79a2bbae2b1dcc5"},
+ {file = "simplejson-3.18.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:8493d2c1a940471b07d7c9c356a3f4eee780df073da2917418d0fe8669b54f99"},
+ {file = "simplejson-3.18.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:448ab14fa67b3ac235a8445d14ec6d56268c3dabbce78720f9efa6d698466710"},
+ {file = "simplejson-3.18.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:989b31d586954e65170ad3ec597218a6790c401b82da6193e8a897a06aa7946e"},
+ {file = "simplejson-3.18.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:002f069c7bb9a86826616a78f1214fea5b993435720990eecb0bf10955b9cd0e"},
+ {file = "simplejson-3.18.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:db53a85f4db0dbd9e5f6277d9153bcaa2ccb87b0d672c6a35f19432b3f2301a3"},
+ {file = "simplejson-3.18.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:0f33d16fa7b5e2ed6ea85d7b31bc84cf8c73c40cc2c9f87071e0fffcd52f5342"},
+ {file = "simplejson-3.18.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:4d8d016f70d241f82189bc9f6d1eb8558b3599861f2c501b3f32da7fdf4e92ac"},
+ {file = "simplejson-3.18.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:46bafa7e794f0e91fde850d906b0dc29a624c726b27e75d23bc8c3e35a48f28b"},
+ {file = "simplejson-3.18.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:94c17d01e4c65e63deec46c984bb810de5e3a1259eb6bacdca63f3efc9c4c673"},
+ {file = "simplejson-3.18.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6804ad50aaf581df5c982fc101b0d932638066fe191074ded783602eb1c8982a"},
+ {file = "simplejson-3.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6187cbea7fdede732fe0347ad08cd920ebd9faa30b6c48782cee494051ca97c6"},
+ {file = "simplejson-3.18.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2aeed35db00cdf5d49ff1e7d878afd38c86a5fead0f1d364d539ad4d7a869e0e"},
+ {file = "simplejson-3.18.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0f49858b5fc802081b71269f4a3aa5c5500ec6553637c9a0630f30a2a6541ea7"},
+ {file = "simplejson-3.18.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:503da91993cc671fe7ebbf120c3ce868278de8226f158336afde874f7b7aa871"},
+ {file = "simplejson-3.18.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f0e12bdafdf7e32c5ad4a073e325ea0d659d4277af8b3d8eccf3101c56879619"},
+ {file = "simplejson-3.18.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d9f7a692c11de20cb8ec680584815315e03d1404a6e299d36489b0fb6447d98d"},
+ {file = "simplejson-3.18.0-cp310-cp310-win32.whl", hash = "sha256:2cc76435569e6c19574a8e913cfccbed832249b2b3b360caee9a4caf8ff866bf"},
+ {file = "simplejson-3.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:4a6199d302ec7d889e1aa6b493aa8e40b4dfa4bd85708f8c8f0c64ce5b8e0986"},
+ {file = "simplejson-3.18.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:98b4c824f15436f1b22fe6d73c42ffacb246f7efc4d9dbbee542dd72355ecc43"},
+ {file = "simplejson-3.18.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9fa2ad4cabb5054faa8d4a44b84134b0ec9d1421f5e9264d057d6be4d13c7fa"},
+ {file = "simplejson-3.18.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d762267c4af617e1798bd0151f626105d06a88f214e3874b77eb89106f899fe"},
+ {file = "simplejson-3.18.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:07e408222931b1a2aab71e60e5f169fa7c0d74cacd4e0a6a0199716cb18dad76"},
+ {file = "simplejson-3.18.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a2f70d8170c7e02166a4c91462581e6ae5f35e3351a6b6c5142adcb04c7153ac"},
+ {file = "simplejson-3.18.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:b71fef8ee41d59509c7f4afac7f627ed143c9e6db9eb08cfbba85e4c4dc5e67b"},
+ {file = "simplejson-3.18.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:7b95c5cf71c16e4fdaa724719aaf8ccbed533e2df57a20bcff825ceeead27688"},
+ {file = "simplejson-3.18.0-cp36-cp36m-win32.whl", hash = "sha256:622cf0e1f870f189a0757fdcad7998a0c1dd46b0e53aeac9960556c141319c83"},
+ {file = "simplejson-3.18.0-cp36-cp36m-win_amd64.whl", hash = "sha256:96979ff7f0daf47422d5f95d2d006da3210e0490a166bce2529f59f55047fc67"},
+ {file = "simplejson-3.18.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:252f7cc5524bb5507a08377a4a75aa7ff4645f3dfca814d38bdbcf0f3c34d1ce"},
+ {file = "simplejson-3.18.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2b19d7aa4e9a1e7bf8caaf5f478a790190c60136314f45bb7702cb5a9337266"},
+ {file = "simplejson-3.18.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d5d25cc5dad31a10d7a8196125515cc3aa68187c8953459fcaf127c2c8410f51"},
+ {file = "simplejson-3.18.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a814227fa08cae435ac7a42dcd2a04a7ec4a3cee23b7f83f9544cd26f452dcc4"},
+ {file = "simplejson-3.18.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:638bdd2deaccd3b8e02b1783280bd82341df5e1faa59c4f0276f03f16eec13ea"},
+ {file = "simplejson-3.18.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bd67d6fad7f4cd7c9cb7fad32d78ce32862fdb574b898447987a5de22fd37d73"},
+ {file = "simplejson-3.18.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9aff3c24017a7819c76b2f177d4fe8334b3d4cb6f702a2d7c666b3d57c36ffb4"},
+ {file = "simplejson-3.18.0-cp37-cp37m-win32.whl", hash = "sha256:17ec5e408fb6615250c1f18fb4eac3b2b99a85e8613bfc2dfa54827d0bf7f3e1"},
+ {file = "simplejson-3.18.0-cp37-cp37m-win_amd64.whl", hash = "sha256:97139bf5134d713710665a6edb9500d69b93642c4b6b44b20800232dbd0f5b39"},
+ {file = "simplejson-3.18.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7a9476dcd72aeba7d55c4800b9cd2204201af3539894b8512d74597e35a3033a"},
+ {file = "simplejson-3.18.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca22993a1a00440392c6c76f39addab8d97c706d2a8bcc2c9b2b6cb2cd7f41df"},
+ {file = "simplejson-3.18.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a0e6dd5a0b8c76fb7522470789f1af793d39d6edbd4e40853e7be550ad49c430"},
+ {file = "simplejson-3.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54c63cc7857f16a20aa170ffda9ebce45a3b7ba764b67a5a95bfe7ae613a2710"},
+ {file = "simplejson-3.18.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5f3dd31309ae5cc9f2df51d2d5cac89722dac3c853042ebefcaf7ad06ca19387"},
+ {file = "simplejson-3.18.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4609feb2ae66c132c6dcbe01dbfd4f6431afb4ff17303e37ca128fb6297cebd2"},
+ {file = "simplejson-3.18.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9db78e18624f94d7b5642bf487244f803dab844e771d92e83f85f22da21ffe2d"},
+ {file = "simplejson-3.18.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1fbacdbba3cf5a471c67a9ca6cd270bba9578d5bc22aef6028faebbdb98bbb15"},
+ {file = "simplejson-3.18.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa9ecdd1d7ecbc7d1066c37cfbe52f65adf64b11b22d481a98fe1d3675dfff4b"},
+ {file = "simplejson-3.18.0-cp38-cp38-win32.whl", hash = "sha256:875cfb43b622672218045dc927a86fc7c4c8111264c1d303aca5de33d5df479e"},
+ {file = "simplejson-3.18.0-cp38-cp38-win_amd64.whl", hash = "sha256:0de746c8f76355c79fd15eccd7ecde0b137cd911bdcdc463fc5c36ec3d8b98ea"},
+ {file = "simplejson-3.18.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:00b673f0b3caf37a3d993bccf30a97290da6313b6ecc7d66937e9cd906d8f840"},
+ {file = "simplejson-3.18.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d522f28f7b252454df86ac3db5a0e1fe5ae03c8fc0cd1592c912b07c9fad6c29"},
+ {file = "simplejson-3.18.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e80f02e68d25c222471fcc5d1933275b8eb396e5e40b7863e4e0a43b3c810059"},
+ {file = "simplejson-3.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f169402069f8cf93e359f607725b1d920c4dbe5bda4c520025d5fad8d20c1b7"},
+ {file = "simplejson-3.18.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e84bd1c29e83ec74a95de070473742eb52d08502f2428eff5751671081e0a0a6"},
+ {file = "simplejson-3.18.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd56a9e0c63a1f9c37621fe298c77795aefd2a26dca80dcae27688586c40b4bb"},
+ {file = "simplejson-3.18.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:db9d36c4c7997c2a2513a5d218fd90b53bfeaf7e727f94aaf3576973378b3bce"},
+ {file = "simplejson-3.18.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce1c0580372d3c9bfa151bd0721a9bd5647b9b2245d0588d813fdbd2eb5d6f22"},
+ {file = "simplejson-3.18.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599e9c53d3203bc36ef68efec138ca76d201da7ac06a114fae78536a8c10e35b"},
+ {file = "simplejson-3.18.0-cp39-cp39-win32.whl", hash = "sha256:1b4085151e00ab7ca66f269aff7153f0ec18589cb22e7ceb8b365709c723fdd0"},
+ {file = "simplejson-3.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:b4997bd8332cef3923402a07351571788f552f55ea1394ffbfccd4d203a8a05f"},
+ {file = "simplejson-3.18.0.tar.gz", hash = "sha256:58a429d2c2fa80834115b923ff689622de8f214cf0dc4afa9f59e824b444ab31"},
]
six = [
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
@@ -2655,74 +2779,40 @@ threadloop = [
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
]
thrift = [
- {file = "thrift-0.15.0.tar.gz", hash = "sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29"},
+ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
tomli = [
- {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"},
- {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"},
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
tornado = [
- {file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"},
- {file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"},
- {file = "tornado-6.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05"},
- {file = "tornado-6.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910"},
- {file = "tornado-6.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b"},
- {file = "tornado-6.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675"},
- {file = "tornado-6.1-cp35-cp35m-win32.whl", hash = "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5"},
- {file = "tornado-6.1-cp35-cp35m-win_amd64.whl", hash = "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68"},
- {file = "tornado-6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb"},
- {file = "tornado-6.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c"},
- {file = "tornado-6.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921"},
- {file = "tornado-6.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558"},
- {file = "tornado-6.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c"},
- {file = "tornado-6.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085"},
- {file = "tornado-6.1-cp36-cp36m-win32.whl", hash = "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575"},
- {file = "tornado-6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795"},
- {file = "tornado-6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f"},
- {file = "tornado-6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102"},
- {file = "tornado-6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4"},
- {file = "tornado-6.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd"},
- {file = "tornado-6.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01"},
- {file = "tornado-6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d"},
- {file = "tornado-6.1-cp37-cp37m-win32.whl", hash = "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df"},
- {file = "tornado-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37"},
- {file = "tornado-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95"},
- {file = "tornado-6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a"},
- {file = "tornado-6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"},
- {file = "tornado-6.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288"},
- {file = "tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f"},
- {file = "tornado-6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6"},
- {file = "tornado-6.1-cp38-cp38-win32.whl", hash = "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326"},
- {file = "tornado-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c"},
- {file = "tornado-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5"},
- {file = "tornado-6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe"},
- {file = "tornado-6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea"},
- {file = "tornado-6.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2"},
- {file = "tornado-6.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0"},
- {file = "tornado-6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd"},
- {file = "tornado-6.1-cp39-cp39-win32.whl", hash = "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c"},
- {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"},
- {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"},
+ {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"},
+ {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"},
+ {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"},
+ {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"},
+ {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"},
]
towncrier = [
- {file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"},
- {file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"},
-]
-tqdm = [
- {file = "tqdm-4.63.0-py2.py3-none-any.whl", hash = "sha256:e643e071046f17139dea55b880dc9b33822ce21613b4a4f5ea57f202833dbc29"},
- {file = "tqdm-4.63.0.tar.gz", hash = "sha256:1d9835ede8e394bb8c9dcbffbca02d717217113adc679236873eeaac5bc0b3cd"},
+ {file = "towncrier-22.8.0-py2.py3-none-any.whl", hash = "sha256:3b780c3d966e1b26414830aec3d15000654b31e64e024f3e5fd128b4c6eb8f47"},
+ {file = "towncrier-22.8.0.tar.gz", hash = "sha256:7d3839b033859b45fb55df82b74cfd702431933c0cc9f287a5a7ea3e05d042cb"},
]
treq = [
{file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
{file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"},
]
twine = [
- {file = "twine-3.8.0-py3-none-any.whl", hash = "sha256:d0550fca9dc19f3d5e8eadfce0c227294df0a2a951251a4385797c8a6198b7c8"},
- {file = "twine-3.8.0.tar.gz", hash = "sha256:8efa52658e0ae770686a13b675569328f1fba9837e5de1867bfe5f46a9aefe19"},
+ {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"},
+ {file = "twine-4.0.2.tar.gz", hash = "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"},
]
-twisted = [
- {file = "Twisted-22.4.0-py3-none-any.whl", hash = "sha256:f9f7a91f94932477a9fc3b169d57f54f96c6e74a23d78d9ce54039a7f48928a2"},
- {file = "Twisted-22.4.0.tar.gz", hash = "sha256:a047990f57dfae1e0bd2b7df2526d4f16dcdc843774dc108b78c52f2a5f13680"},
+Twisted = [
+ {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"},
+ {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"},
]
twisted-iocpsupport = [
{file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"},
@@ -2743,98 +2833,90 @@ txredisapi = [
{file = "txredisapi-1.4.7.tar.gz", hash = "sha256:e6cc43f51e35d608abdca8f8c7d20e148fe1d82679f6e584baea613ebec812bb"},
]
typed-ast = [
- {file = "typed_ast-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:183b183b7771a508395d2cbffd6db67d6ad52958a5fdc99f450d954003900266"},
- {file = "typed_ast-1.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:676d051b1da67a852c0447621fdd11c4e104827417bf216092ec3e286f7da596"},
- {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc2542e83ac8399752bc16e0b35e038bdb659ba237f4222616b4e83fb9654985"},
- {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74cac86cc586db8dfda0ce65d8bcd2bf17b58668dfcc3652762f3ef0e6677e76"},
- {file = "typed_ast-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:18fe320f354d6f9ad3147859b6e16649a0781425268c4dde596093177660e71a"},
- {file = "typed_ast-1.5.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:31d8c6b2df19a777bc8826770b872a45a1f30cfefcfd729491baa5237faae837"},
- {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:963a0ccc9a4188524e6e6d39b12c9ca24cc2d45a71cfdd04a26d883c922b4b78"},
- {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0eb77764ea470f14fcbb89d51bc6bbf5e7623446ac4ed06cbd9ca9495b62e36e"},
- {file = "typed_ast-1.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:294a6903a4d087db805a7656989f613371915fc45c8cc0ddc5c5a0a8ad9bea4d"},
- {file = "typed_ast-1.5.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:26a432dc219c6b6f38be20a958cbe1abffcc5492821d7e27f08606ef99e0dffd"},
- {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7407cfcad702f0b6c0e0f3e7ab876cd1d2c13b14ce770e412c0c4b9728a0f88"},
- {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f30ddd110634c2d7534b2d4e0e22967e88366b0d356b24de87419cc4410c41b7"},
- {file = "typed_ast-1.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8c08d6625bb258179b6e512f55ad20f9dfef019bbfbe3095247401e053a3ea30"},
- {file = "typed_ast-1.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:90904d889ab8e81a956f2c0935a523cc4e077c7847a836abee832f868d5c26a4"},
- {file = "typed_ast-1.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bbebc31bf11762b63bf61aaae232becb41c5bf6b3461b80a4df7e791fabb3aca"},
- {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29dd9a3a9d259c9fa19d19738d021632d673f6ed9b35a739f48e5f807f264fb"},
- {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:58ae097a325e9bb7a684572d20eb3e1809802c5c9ec7108e85da1eb6c1a3331b"},
- {file = "typed_ast-1.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:da0a98d458010bf4fe535f2d1e367a2e2060e105978873c04c04212fb20543f7"},
- {file = "typed_ast-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33b4a19ddc9fc551ebabca9765d54d04600c4a50eda13893dadf67ed81d9a098"},
- {file = "typed_ast-1.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1098df9a0592dd4c8c0ccfc2e98931278a6c6c53cb3a3e2cf7e9ee3b06153344"},
- {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c47c3b43fe3a39ddf8de1d40dbbfca60ac8530a36c9b198ea5b9efac75c09e"},
- {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f290617f74a610849bd8f5514e34ae3d09eafd521dceaa6cf68b3f4414266d4e"},
- {file = "typed_ast-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:df05aa5b241e2e8045f5f4367a9f6187b09c4cdf8578bb219861c4e27c443db5"},
- {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"},
+ {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"},
+ {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"},
+ {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"},
+ {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"},
+ {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"},
+ {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"},
+ {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"},
+ {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"},
+ {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"},
+ {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"},
+ {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"},
+ {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"},
+ {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"},
+ {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"},
+ {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"},
+ {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"},
+ {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"},
+ {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"},
+ {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"},
+ {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"},
+ {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"},
+ {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"},
+ {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"},
+ {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"},
]
types-bleach = [
- {file = "types-bleach-4.1.4.tar.gz", hash = "sha256:2d30c2c4fb6854088ac636471352c9a51bf6c089289800d2a8060820a01cd43a"},
- {file = "types_bleach-4.1.4-py3-none-any.whl", hash = "sha256:edffe173ed6d7b6f3543036a96204a9319c3bf6c3645917b14274e43f000cc9b"},
+ {file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"},
+ {file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"},
]
types-commonmark = [
{file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"},
{file = "types_commonmark-0.9.2-py3-none-any.whl", hash = "sha256:56f20199a1f9a2924443211a0ef97f8b15a8a956a7f4e9186be6950bf38d6d02"},
]
types-cryptography = [
- {file = "types-cryptography-3.3.15.tar.gz", hash = "sha256:a7983a75a7b88a18f88832008f0ef140b8d1097888ec1a0824ec8fb7e105273b"},
- {file = "types_cryptography-3.3.15-py3-none-any.whl", hash = "sha256:d9b0dd5465d7898d400850e7f35e5518aa93a7e23d3e11757cd81b4777089046"},
-]
-types-enum34 = [
- {file = "types-enum34-1.1.8.tar.gz", hash = "sha256:6f9c769641d06d73a55e11c14d38ac76fcd37eb545ce79cebb6eec9d50a64110"},
- {file = "types_enum34-1.1.8-py3-none-any.whl", hash = "sha256:05058c7a495f6bfaaca0be4aeac3cce5cdd80a2bad2aab01fd49a20bf4a0209d"},
-]
-types-ipaddress = [
- {file = "types-ipaddress-1.0.8.tar.gz", hash = "sha256:a03df3be5935e50ba03fa843daabff539a041a28e73e0fce2c5705bee54d3841"},
- {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"},
+ {file = "types-cryptography-3.3.23.2.tar.gz", hash = "sha256:09cc53f273dd4d8c29fa7ad11fefd9b734126d467960162397bc5e3e604dea75"},
+ {file = "types_cryptography-3.3.23.2-py3-none-any.whl", hash = "sha256:b965d548f148f8e87f353ccf2b7bd92719fdf6c845ff7cedf2abb393a0643e4f"},
]
types-jsonschema = [
- {file = "types-jsonschema-4.4.6.tar.gz", hash = "sha256:7f2a804618756768c7c0616f8c794b61fcfe3077c7ee1ad47dcf01c5e5f692bb"},
- {file = "types_jsonschema-4.4.6-py3-none-any.whl", hash = "sha256:1db9031ca49a8444d01bd2ce8cf2f89318382b04610953b108321e6f8fb03390"},
+ {file = "types-jsonschema-4.17.0.2.tar.gz", hash = "sha256:8b9e1140d4d780f0f19b5cab1b8a3732e8dd5e49dbc1f174cc0b499125ca6f6c"},
+ {file = "types_jsonschema-4.17.0.2-py3-none-any.whl", hash = "sha256:8fd2f9aea4da54f9a811baa6963aac10fd680c18baa6237392c079b97d152738"},
]
types-opentracing = [
- {file = "types-opentracing-2.4.7.tar.gz", hash = "sha256:be60e9618355aa892571ace002e6b353702538b1c0dc4fbc1c921219d6658830"},
- {file = "types_opentracing-2.4.7-py3-none-any.whl", hash = "sha256:861fb8103b07cf717f501dd400cb274ca9992552314d4d6c7a824b11a215e512"},
+ {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"},
+ {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"},
]
-types-pillow = [
- {file = "types-Pillow-9.0.15.tar.gz", hash = "sha256:d2e385fe5c192e75970f18accce69f5c2a9f186f3feb578a9b91cd6fdf64211d"},
- {file = "types_Pillow-9.0.15-py3-none-any.whl", hash = "sha256:c9646595dfafdf8b63d4b1443292ead17ee0fc7b18a143e497b68e0ea2dc1eb6"},
+types-Pillow = [
+ {file = "types-Pillow-9.3.0.4.tar.gz", hash = "sha256:c18d466dc18550d96b8b4a279ff94f0cbad696825b5ad55466604f1daf5709de"},
+ {file = "types_Pillow-9.3.0.4-py3-none-any.whl", hash = "sha256:98b8484ff343676f6f7051682a6cfd26896e993e86b3ce9badfa0ec8750f5405"},
]
types-psycopg2 = [
- {file = "types-psycopg2-2.9.9.tar.gz", hash = "sha256:4f9d4d52eeb343dc00fd5ed4f1513a8a5c18efba0a072eb82706d15cf4f20a2e"},
- {file = "types_psycopg2-2.9.9-py3-none-any.whl", hash = "sha256:cec9291d4318ad70b407310f8304b3d40f6d0358f09870448f7a65e3027c80af"},
+ {file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"},
+ {file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"},
]
types-pyOpenSSL = [
- {file = "types-pyOpenSSL-22.0.10.tar.gz", hash = "sha256:f943b834f5b97e5e808764c2f6e37be1a2e226c46792296f61558196acfcc3a1"},
- {file = "types_pyOpenSSL-22.0.10-py3-none-any.whl", hash = "sha256:63baea211768bea580a769ac5c0d637ae8cd3150314aadc5726ca22e4c4f241a"},
+ {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
+ {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"},
]
types-PyYAML = [
- {file = "types-PyYAML-6.0.12.tar.gz", hash = "sha256:f6f350418125872f3f0409d96a62a5a5ceb45231af5cc07ee0034ec48a3c82fa"},
- {file = "types_PyYAML-6.0.12-py3-none-any.whl", hash = "sha256:29228db9f82df4f1b7febee06bbfb601677882e98a3da98132e31c6874163e15"},
+ {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"},
+ {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"},
]
types-requests = [
- {file = "types-requests-2.28.11.tar.gz", hash = "sha256:7ee827eb8ce611b02b5117cfec5da6455365b6a575f5e3ff19f655ba603e6b4e"},
- {file = "types_requests-2.28.11-py3-none-any.whl", hash = "sha256:af5f55e803cabcfb836dad752bd6d8a0fc8ef1cd84243061c0e27dee04ccf4fd"},
+ {file = "types-requests-2.28.11.5.tar.gz", hash = "sha256:a7df37cc6fb6187a84097da951f8e21d335448aa2501a6b0a39cbd1d7ca9ee2a"},
+ {file = "types_requests-2.28.11.5-py3-none-any.whl", hash = "sha256:091d4a5a33c1b4f20d8b1b952aa8fa27a6e767c44c3cf65e56580df0b05fd8a9"},
]
types-setuptools = [
- {file = "types-setuptools-65.4.0.0.tar.gz", hash = "sha256:d9021d6a70690b34e7bd2947e7ab10167c646fbf062508cb56581be2e2a1615e"},
- {file = "types_setuptools-65.4.0.0-py3-none-any.whl", hash = "sha256:ce178b3f7dbd6c0e67f8eee7ae29c1be280ade7e5188bdd9e620843de4060d85"},
+ {file = "types-setuptools-65.6.0.2.tar.gz", hash = "sha256:ad60ccf01d626de9762224448f36c13e0660e863afd6dc11d979b3739a6c7d24"},
+ {file = "types_setuptools-65.6.0.2-py3-none-any.whl", hash = "sha256:2c2b4f756f79778074ce2d21f745aa737b12160d9f8dfa274f47a7287c7a2fee"},
]
types-urllib3 = [
- {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"},
- {file = "types_urllib3-1.26.10-py3-none-any.whl", hash = "sha256:d755278d5ecd7a7a6479a190e54230f241f1a99c19b81518b756b19dc69e518c"},
+ {file = "types-urllib3-1.26.25.4.tar.gz", hash = "sha256:eec5556428eec862b1ac578fb69aab3877995a99ffec9e5a12cf7fbd0cc9daee"},
+ {file = "types_urllib3-1.26.25.4-py3-none-any.whl", hash = "sha256:ed6b9e8a8be488796f72306889a06a3fc3cb1aa99af02ab8afb50144d7317e49"},
]
typing-extensions = [
- {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"},
- {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"},
+ {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"},
+ {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"},
]
unpaddedbase64 = [
{file = "unpaddedbase64-2.1.0-py3-none-any.whl", hash = "sha256:485eff129c30175d2cd6f0cd8d2310dff51e666f7f36175f738d75dfdbd0b1c6"},
{file = "unpaddedbase64-2.1.0.tar.gz", hash = "sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005"},
]
urllib3 = [
- {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"},
- {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"},
+ {file = "urllib3-1.26.13-py2.py3-none-any.whl", hash = "sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc"},
+ {file = "urllib3-1.26.13.tar.gz", hash = "sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8"},
]
uvloop = [
{file = "uvloop-0.17.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ce9f61938d7155f79d3cb2ffa663147d4a76d16e08f65e2c66b77bd41b356718"},
@@ -2873,124 +2955,122 @@ webencodings = [
{file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
]
wrapt = [
- {file = "wrapt-1.13.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229"},
- {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80"},
- {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca"},
- {file = "wrapt-1.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44"},
- {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056"},
- {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785"},
- {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096"},
- {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33"},
- {file = "wrapt-1.13.3-cp310-cp310-win32.whl", hash = "sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f"},
- {file = "wrapt-1.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3"},
- {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755"},
- {file = "wrapt-1.13.3-cp35-cp35m-win32.whl", hash = "sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851"},
- {file = "wrapt-1.13.3-cp35-cp35m-win_amd64.whl", hash = "sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13"},
- {file = "wrapt-1.13.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918"},
- {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade"},
- {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc"},
- {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf"},
- {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125"},
- {file = "wrapt-1.13.3-cp36-cp36m-win32.whl", hash = "sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36"},
- {file = "wrapt-1.13.3-cp36-cp36m-win_amd64.whl", hash = "sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10"},
- {file = "wrapt-1.13.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068"},
- {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709"},
- {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df"},
- {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2"},
- {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b"},
- {file = "wrapt-1.13.3-cp37-cp37m-win32.whl", hash = "sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829"},
- {file = "wrapt-1.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea"},
- {file = "wrapt-1.13.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9"},
- {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554"},
- {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c"},
- {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b"},
- {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce"},
- {file = "wrapt-1.13.3-cp38-cp38-win32.whl", hash = "sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79"},
- {file = "wrapt-1.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb"},
- {file = "wrapt-1.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb"},
- {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32"},
- {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7"},
- {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e"},
- {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640"},
- {file = "wrapt-1.13.3-cp39-cp39-win32.whl", hash = "sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374"},
- {file = "wrapt-1.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb"},
- {file = "wrapt-1.13.3.tar.gz", hash = "sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185"},
+ {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"},
+ {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"},
+ {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"},
+ {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"},
+ {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"},
+ {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"},
+ {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"},
+ {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"},
+ {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"},
+ {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"},
]
xmlschema = [
- {file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"},
- {file = "xmlschema-1.10.0.tar.gz", hash = "sha256:be1eedce6a4b911fd3a7f4060d0811951820a13410e61f0454b30e9f4e7cf197"},
+ {file = "xmlschema-2.1.1-py3-none-any.whl", hash = "sha256:5717a8a239637a9ad7d7563ce676dddf0a8989644c833f96bfc6d157c3cb3750"},
+ {file = "xmlschema-2.1.1.tar.gz", hash = "sha256:5ca34ff15dd3276cfb2e3e7b4c8dde4b7d4d27080f333a93b6c3f817e90abddf"},
]
zipp = [
- {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"},
- {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"},
+ {file = "zipp-3.11.0-py3-none-any.whl", hash = "sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa"},
+ {file = "zipp-3.11.0.tar.gz", hash = "sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766"},
]
"zope.event" = [
{file = "zope.event-4.5.0-py2.py3-none-any.whl", hash = "sha256:2666401939cdaa5f4e0c08cf7f20c9b21423b95e88f4675b1443973bdb080c42"},
{file = "zope.event-4.5.0.tar.gz", hash = "sha256:5e76517f5b9b119acf37ca8819781db6c16ea433f7e2062c4afc2b6fbedb1330"},
]
"zope.interface" = [
- {file = "zope.interface-5.4.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:7df1e1c05304f26faa49fa752a8c690126cf98b40b91d54e6e9cc3b7d6ffe8b7"},
- {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2c98384b254b37ce50eddd55db8d381a5c53b4c10ee66e1e7fe749824f894021"},
- {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:08f9636e99a9d5410181ba0729e0408d3d8748026ea938f3b970a0249daa8192"},
- {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0ea1d73b7c9dcbc5080bb8aaffb776f1c68e807767069b9ccdd06f27a161914a"},
- {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:273f158fabc5ea33cbc936da0ab3d4ba80ede5351babc4f577d768e057651531"},
- {file = "zope.interface-5.4.0-cp27-cp27m-win32.whl", hash = "sha256:a1e6e96217a0f72e2b8629e271e1b280c6fa3fe6e59fa8f6701bec14e3354325"},
- {file = "zope.interface-5.4.0-cp27-cp27m-win_amd64.whl", hash = "sha256:877473e675fdcc113c138813a5dd440da0769a2d81f4d86614e5d62b69497155"},
- {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f7ee479e96f7ee350db1cf24afa5685a5899e2b34992fb99e1f7c1b0b758d263"},
- {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:b0297b1e05fd128d26cc2460c810d42e205d16d76799526dfa8c8ccd50e74959"},
- {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:af310ec8335016b5e52cae60cda4a4f2a60a788cbb949a4fbea13d441aa5a09e"},
- {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:9a9845c4c6bb56e508651f005c4aeb0404e518c6f000d5a1123ab077ab769f5c"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0b465ae0962d49c68aa9733ba92a001b2a0933c317780435f00be7ecb959c702"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:5dd9ca406499444f4c8299f803d4a14edf7890ecc595c8b1c7115c2342cadc5f"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:469e2407e0fe9880ac690a3666f03eb4c3c444411a5a5fddfdabc5d184a79f05"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:52de7fc6c21b419078008f697fd4103dbc763288b1406b4562554bd47514c004"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:3dd4952748521205697bc2802e4afac5ed4b02909bb799ba1fe239f77fd4e117"},
- {file = "zope.interface-5.4.0-cp35-cp35m-win32.whl", hash = "sha256:dd93ea5c0c7f3e25335ab7d22a507b1dc43976e1345508f845efc573d3d779d8"},
- {file = "zope.interface-5.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:3748fac0d0f6a304e674955ab1365d515993b3a0a865e16a11ec9d86fb307f63"},
- {file = "zope.interface-5.4.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:66c0061c91b3b9cf542131148ef7ecbecb2690d48d1612ec386de9d36766058f"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:d0c1bc2fa9a7285719e5678584f6b92572a5b639d0e471bb8d4b650a1a910920"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2876246527c91e101184f63ccd1d716ec9c46519cc5f3d5375a3351c46467c46"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:334701327f37c47fa628fc8b8d28c7d7730ce7daaf4bda1efb741679c2b087fc"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:71aace0c42d53abe6fc7f726c5d3b60d90f3c5c055a447950ad6ea9cec2e37d9"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5bb3489b4558e49ad2c5118137cfeaf59434f9737fa9c5deefc72d22c23822e2"},
- {file = "zope.interface-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:1c0e316c9add0db48a5b703833881351444398b04111188069a26a61cfb4df78"},
- {file = "zope.interface-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f0c02cbb9691b7c91d5009108f975f8ffeab5dff8f26d62e21c493060eff2a1"},
- {file = "zope.interface-5.4.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:7d97a4306898b05404a0dcdc32d9709b7d8832c0c542b861d9a826301719794e"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:867a5ad16892bf20e6c4ea2aab1971f45645ff3102ad29bd84c86027fa99997b"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5f931a1c21dfa7a9c573ec1f50a31135ccce84e32507c54e1ea404894c5eb96f"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:194d0bcb1374ac3e1e023961610dc8f2c78a0f5f634d0c737691e215569e640d"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:8270252effc60b9642b423189a2fe90eb6b59e87cbee54549db3f5562ff8d1b8"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:15e7d1f7a6ee16572e21e3576d2012b2778cbacf75eb4b7400be37455f5ca8bf"},
- {file = "zope.interface-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:8892f89999ffd992208754851e5a052f6b5db70a1e3f7d54b17c5211e37a98c7"},
- {file = "zope.interface-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2e5a26f16503be6c826abca904e45f1a44ff275fdb7e9d1b75c10671c26f8b94"},
- {file = "zope.interface-5.4.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:0f91b5b948686659a8e28b728ff5e74b1be6bf40cb04704453617e5f1e945ef3"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:4de4bc9b6d35c5af65b454d3e9bc98c50eb3960d5a3762c9438df57427134b8e"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:bf68f4b2b6683e52bec69273562df15af352e5ed25d1b6641e7efddc5951d1a7"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:63b82bb63de7c821428d513607e84c6d97d58afd1fe2eb645030bdc185440120"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:db1fa631737dab9fa0b37f3979d8d2631e348c3b4e8325d6873c2541d0ae5a48"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f44e517131a98f7a76696a7b21b164bcb85291cee106a23beccce454e1f433a4"},
- {file = "zope.interface-5.4.0-cp38-cp38-win32.whl", hash = "sha256:a9506a7e80bcf6eacfff7f804c0ad5350c8c95b9010e4356a4b36f5322f09abb"},
- {file = "zope.interface-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:3c02411a3b62668200910090a0dff17c0b25aaa36145082a5a6adf08fa281e54"},
- {file = "zope.interface-5.4.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:0cee5187b60ed26d56eb2960136288ce91bcf61e2a9405660d271d1f122a69a4"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:a8156e6a7f5e2a0ff0c5b21d6bcb45145efece1909efcbbbf48c56f8da68221d"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:205e40ccde0f37496904572035deea747390a8b7dc65146d30b96e2dd1359a83"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:3f24df7124c323fceb53ff6168da70dbfbae1442b4f3da439cd441681f54fe25"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:5208ebd5152e040640518a77827bdfcc73773a15a33d6644015b763b9c9febc1"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:17776ecd3a1fdd2b2cd5373e5ef8b307162f581c693575ec62e7c5399d80794c"},
- {file = "zope.interface-5.4.0-cp39-cp39-win32.whl", hash = "sha256:d4d9d6c1a455d4babd320203b918ccc7fcbefe308615c521062bc2ba1aa4d26e"},
- {file = "zope.interface-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:0cba8477e300d64a11a9789ed40ee8932b59f9ee05f85276dbb4b59acee5dd09"},
- {file = "zope.interface-5.4.0.tar.gz", hash = "sha256:5dba5f530fec3f0988d83b78cc591b58c0b6eb8431a85edd1569a0539a8a5a0e"},
+ {file = "zope.interface-5.5.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:a2ad597c8c9e038a5912ac3cf166f82926feff2f6e0dabdab956768de0a258f5"},
+ {file = "zope.interface-5.5.2-cp27-cp27m-win_amd64.whl", hash = "sha256:65c3c06afee96c654e590e046c4a24559e65b0a87dbff256cd4bd6f77e1a33f9"},
+ {file = "zope.interface-5.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d514c269d1f9f5cd05ddfed15298d6c418129f3f064765295659798349c43e6f"},
+ {file = "zope.interface-5.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5334e2ef60d3d9439c08baedaf8b84dc9bb9522d0dacbc10572ef5609ef8db6d"},
+ {file = "zope.interface-5.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc26c8d44472e035d59d6f1177eb712888447f5799743da9c398b0339ed90b1b"},
+ {file = "zope.interface-5.5.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:17ebf6e0b1d07ed009738016abf0d0a0f80388e009d0ac6e0ead26fc162b3b9c"},
+ {file = "zope.interface-5.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f98d4bd7bbb15ca701d19b93263cc5edfd480c3475d163f137385f49e5b3a3a7"},
+ {file = "zope.interface-5.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:696f3d5493eae7359887da55c2afa05acc3db5fc625c49529e84bd9992313296"},
+ {file = "zope.interface-5.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7579960be23d1fddecb53898035a0d112ac858c3554018ce615cefc03024e46d"},
+ {file = "zope.interface-5.5.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:765d703096ca47aa5d93044bf701b00bbce4d903a95b41fff7c3796e747b1f1d"},
+ {file = "zope.interface-5.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e945de62917acbf853ab968d8916290548df18dd62c739d862f359ecd25842a6"},
+ {file = "zope.interface-5.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:655796a906fa3ca67273011c9805c1e1baa047781fca80feeb710328cdbed87f"},
+ {file = "zope.interface-5.5.2-cp35-cp35m-win_amd64.whl", hash = "sha256:0fb497c6b088818e3395e302e426850f8236d8d9f4ef5b2836feae812a8f699c"},
+ {file = "zope.interface-5.5.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:008b0b65c05993bb08912f644d140530e775cf1c62a072bf9340c2249e613c32"},
+ {file = "zope.interface-5.5.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:404d1e284eda9e233c90128697c71acffd55e183d70628aa0bbb0e7a3084ed8b"},
+ {file = "zope.interface-5.5.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:3218ab1a7748327e08ef83cca63eea7cf20ea7e2ebcb2522072896e5e2fceedf"},
+ {file = "zope.interface-5.5.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d169ccd0756c15bbb2f1acc012f5aab279dffc334d733ca0d9362c5beaebe88e"},
+ {file = "zope.interface-5.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:e1574980b48c8c74f83578d1e77e701f8439a5d93f36a5a0af31337467c08fcf"},
+ {file = "zope.interface-5.5.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:0217a9615531c83aeedb12e126611b1b1a3175013bbafe57c702ce40000eb9a0"},
+ {file = "zope.interface-5.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:311196634bb9333aa06f00fc94f59d3a9fddd2305c2c425d86e406ddc6f2260d"},
+ {file = "zope.interface-5.5.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6373d7eb813a143cb7795d3e42bd8ed857c82a90571567e681e1b3841a390d16"},
+ {file = "zope.interface-5.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:959697ef2757406bff71467a09d940ca364e724c534efbf3786e86eee8591452"},
+ {file = "zope.interface-5.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:dbaeb9cf0ea0b3bc4b36fae54a016933d64c6d52a94810a63c00f440ecb37dd7"},
+ {file = "zope.interface-5.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604cdba8f1983d0ab78edc29aa71c8df0ada06fb147cea436dc37093a0100a4e"},
+ {file = "zope.interface-5.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e74a578172525c20d7223eac5f8ad187f10940dac06e40113d62f14f3adb1e8f"},
+ {file = "zope.interface-5.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0980d44b8aded808bec5059018d64692f0127f10510eca71f2f0ace8fb11188"},
+ {file = "zope.interface-5.5.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6e972493cdfe4ad0411fd9abfab7d4d800a7317a93928217f1a5de2bb0f0d87a"},
+ {file = "zope.interface-5.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9d783213fab61832dbb10d385a319cb0e45451088abd45f95b5bb88ed0acca1a"},
+ {file = "zope.interface-5.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:a16025df73d24795a0bde05504911d306307c24a64187752685ff6ea23897cb0"},
+ {file = "zope.interface-5.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:40f4065745e2c2fa0dff0e7ccd7c166a8ac9748974f960cd39f63d2c19f9231f"},
+ {file = "zope.interface-5.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8a2ffadefd0e7206adc86e492ccc60395f7edb5680adedf17a7ee4205c530df4"},
+ {file = "zope.interface-5.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d692374b578360d36568dd05efb8a5a67ab6d1878c29c582e37ddba80e66c396"},
+ {file = "zope.interface-5.5.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4087e253bd3bbbc3e615ecd0b6dd03c4e6a1e46d152d3be6d2ad08fbad742dcc"},
+ {file = "zope.interface-5.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fb68d212efd057596dee9e6582daded9f8ef776538afdf5feceb3059df2d2e7b"},
+ {file = "zope.interface-5.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:7e66f60b0067a10dd289b29dceabd3d0e6d68be1504fc9d0bc209cf07f56d189"},
+ {file = "zope.interface-5.5.2.tar.gz", hash = "sha256:bfee1f3ff62143819499e348f5b8a7f3aa0259f9aca5e0ddae7391d059dce671"},
]
"zope.schema" = [
- {file = "zope.schema-6.2.0-py2.py3-none-any.whl", hash = "sha256:03150d8670549590b45109e06b7b964f4e751fa9cb5297ec4985c3bc38641b07"},
- {file = "zope.schema-6.2.0.tar.gz", hash = "sha256:2201aef8ad75ee5a881284d7a6acd384661d6dca7bde5e80a22839a77124595b"},
+ {file = "zope.schema-6.2.1-py2.py3-none-any.whl", hash = "sha256:843c6fce13886333f707246f135a832f4408555ca9650f07ed6e4d429302f349"},
+ {file = "zope.schema-6.2.1.tar.gz", hash = "sha256:e3b33c8bc8ba9d85a56713ab8f0a3c2615d54a8085f1e415ce0c8dfb5e540be6"},
]
diff --git a/pyproject.toml b/pyproject.toml
index 7634af8ba866..fe41c811643c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
-version = "1.69.0rc2"
+version = "1.74.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors "]
license = "Apache-2.0"
@@ -141,7 +141,8 @@ pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7"
Pillow = ">=5.4.0"
-sortedcontainers = ">=1.4.4"
+# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
+sortedcontainers = ">=1.5.2"
pymacaroons = ">=0.13.0"
msgpack = ">=0.5.2"
phonenumbers = ">=8.2.0"
@@ -192,7 +193,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
pysaml2 = { version = ">=4.5.0", optional = true }
-authlib = { version = ">=0.14.0", optional = true }
+authlib = { version = ">=0.15.1", optional = true }
# systemd-python is necessary for logging to the systemd journal via
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
@@ -207,6 +208,7 @@ hiredis = { version = "*", optional = true }
Pympler = { version = "*", optional = true }
parameterized = { version = ">=0.7.4", optional = true }
idna = { version = ">=2.5", optional = true }
+pyicu = { version = ">=2.10.2", optional = true }
uvloop = { version = ">=0.17.0", optional = true }
[tool.poetry.extras]
@@ -220,7 +222,7 @@ oidc = ["authlib"]
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
systemd = ["systemd-python"]
-url_preview = ["lxml"]
+url-preview = ["lxml"]
sentry = ["sentry-sdk"]
opentracing = ["jaeger-client", "opentracing"]
jwt = ["authlib"]
@@ -228,8 +230,12 @@ jwt = ["authlib"]
# (if it is not installed, we fall back to slow code.)
redis = ["txredisapi", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
-cache_memory = ["pympler"]
+cache-memory = ["pympler"]
test = ["parameterized", "idna"]
+# Allows for better search for international characters in the user directory. This
+# requires libicu's development headers installed on the system (e.g. libicu-dev on
+# Debian-based distributions).
+user-search = ["pyicu"]
uvloop = ["uvloop"]
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
@@ -252,7 +258,7 @@ all = [
"pysaml2",
# oidc and jwt
"authlib",
- # url_preview
+ # url-preview
"lxml",
# sentry
"sentry-sdk",
@@ -260,8 +266,10 @@ all = [
"jaeger-client", "opentracing",
# redis
"txredisapi", "hiredis",
- # cache_memory
+ # cache-memory
"pympler",
+ # improved user search
+ "pyicu",
# uvloop
"uvloop",
# omitted:
@@ -271,10 +279,10 @@ all = [
[tool.poetry.dev-dependencies]
## We pin black so that our tests don't start failing on new releases.
-isort = "==5.10.1"
-black = "==22.3.0"
+isort = ">=5.10.1"
+black = ">=22.3.0"
flake8-comprehensions = "*"
-flake8-bugbear = "==21.3.2"
+flake8-bugbear = ">=21.3.2"
flake8 = "*"
# Typechecking
@@ -300,27 +308,32 @@ parameterized = ">=0.7.4"
idna = ">=2.5"
# The following are used by the release script
-click = "==8.1.1"
+click = ">=8.1.3"
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
GitPython = ">=3.1.20"
-commonmark = "==0.9.1"
-pygithub = "==1.55"
+commonmark = ">=0.9.1"
+pygithub = ">=1.55"
# The following are executed as commands by the release script.
twine = "*"
# Towncrier min version comes from #3425. Rationale unclear.
towncrier = ">=18.6.0rc1"
[build-system]
-requires = ["poetry-core==1.2.0", "setuptools_rust==1.5.2"]
+# The upper bounds here are defensive, intended to prevent situations like
+# #13849 and #14079 where we see buildtime or runtime errors caused by build
+# system changes.
+# We are happy to raise these upper bounds upon request,
+# provided we check that it's safe to do so (i.e. that CI passes).
+requires = ["poetry-core>=1.0.0,<=1.3.2", "setuptools_rust>=1.3,<=1.5.2"]
build-backend = "poetry.core.masonry.api"
[tool.cibuildwheel]
# Skip unsupported platforms (by us or by Rust).
-skip = "cp36* *-musllinux_i686"
+skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler
-before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y"
+before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
# For some reason if we don't manually clean the build directory we
@@ -329,3 +342,12 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" }
before-build = "rm -rf {project}/build"
build-frontend = "build"
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
+
+
+[tool.cibuildwheel.linux]
+# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
+repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
+
+[tool.cibuildwheel.macos]
+# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
+repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"
diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs
index ed411461d139..442a79348fcf 100644
--- a/rust/benches/evaluator.rs
+++ b/rust/benches/evaluator.rs
@@ -33,10 +33,12 @@ fn bench_match_exact(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
- 0,
+ Some(0),
Default::default(),
Default::default(),
true,
+ vec![],
+ false,
)
.unwrap();
@@ -67,10 +69,12 @@ fn bench_match_word(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
- 0,
+ Some(0),
Default::default(),
Default::default(),
true,
+ vec![],
+ false,
)
.unwrap();
@@ -101,10 +105,12 @@ fn bench_match_word_miss(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
- 0,
+ Some(0),
Default::default(),
Default::default(),
true,
+ vec![],
+ false,
)
.unwrap();
@@ -135,10 +141,12 @@ fn bench_eval_message(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
- 0,
+ Some(0),
Default::default(),
Default::default(),
true,
+ vec![],
+ false,
)
.unwrap();
diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs
index 2a09cf99ae13..35129691ca43 100644
--- a/rust/src/push/base_rules.rs
+++ b/rust/src/push/base_rules.rs
@@ -25,6 +25,7 @@ use crate::push::Action;
use crate::push::Condition;
use crate::push::EventMatchCondition;
use crate::push::PushRule;
+use crate::push::RelatedEventMatchCondition;
use crate::push::SetTweak;
use crate::push::TweakValue;
@@ -114,6 +115,22 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
+ RelatedEventMatchCondition {
+ key: Some(Cow::Borrowed("sender")),
+ pattern: None,
+ pattern_type: Some(Cow::Borrowed("user_id")),
+ rel_type: Cow::Borrowed("m.in_reply_to"),
+ include_fallbacks: None,
+ },
+ ))]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
priority_class: 5,
@@ -258,15 +275,152 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
default_enabled: true,
},
PushRule {
- rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3772.thread_reply"),
+ rule_id: Cow::Borrowed(
+ "global/underride/.org.matrix.msc3933.rule.extensible.encrypted_room_one_to_one",
+ ),
priority_class: 1,
- conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelationMatch {
- rel_type: Cow::Borrowed("m.thread"),
- event_type_pattern: None,
- sender: None,
- sender_type: Some(Cow::Borrowed("user_id")),
- })]),
- actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::RoomMemberCount {
+ is: Some(Cow::Borrowed("2")),
+ }),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed(
+ "global/underride/.org.matrix.msc3933.rule.extensible.message.room_one_to_one",
+ ),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::RoomMemberCount {
+ is: Some(Cow::Borrowed("2")),
+ }),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed(
+ "global/underride/.org.matrix.msc3933.rule.extensible.file.room_one_to_one",
+ ),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::RoomMemberCount {
+ is: Some(Cow::Borrowed("2")),
+ }),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed(
+ "global/underride/.org.matrix.msc3933.rule.extensible.image.room_one_to_one",
+ ),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::RoomMemberCount {
+ is: Some(Cow::Borrowed("2")),
+ }),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed(
+ "global/underride/.org.matrix.msc3933.rule.extensible.video.room_one_to_one",
+ ),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::RoomMemberCount {
+ is: Some(Cow::Borrowed("2")),
+ }),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed(
+ "global/underride/.org.matrix.msc3933.rule.extensible.audio.room_one_to_one",
+ ),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
+ pattern_type: None,
+ })),
+ Condition::Known(KnownCondition::RoomMemberCount {
+ is: Some(Cow::Borrowed("2")),
+ }),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
@@ -298,6 +452,126 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
default: true,
default_enabled: true,
},
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.encrypted"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("m.encrypted")),
+ pattern_type: None,
+ })),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.message"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("m.message")),
+ pattern_type: None,
+ })),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.file"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("m.file")),
+ pattern_type: None,
+ })),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.image"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("m.image")),
+ pattern_type: None,
+ })),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.video"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("m.video")),
+ pattern_type: None,
+ })),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
+ PushRule {
+ rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.audio"),
+ priority_class: 1,
+ conditions: Cow::Borrowed(&[
+ Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ key: Cow::Borrowed("type"),
+ // MSC3933: Type changed from template rule - see MSC.
+ pattern: Some(Cow::Borrowed("m.audio")),
+ pattern_type: None,
+ })),
+ // MSC3933: Add condition on top of template rule - see MSC.
+ Condition::Known(KnownCondition::RoomVersionSupports {
+ // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
+ feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
+ }),
+ ]),
+ actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
+ default: true,
+ default_enabled: true,
+ },
PushRule {
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
priority_class: 1,
diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs
index efe88ec76e54..c901c0fbcc60 100644
--- a/rust/src/push/evaluator.rs
+++ b/rust/src/push/evaluator.rs
@@ -12,10 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::{
- borrow::Cow,
- collections::{BTreeMap, BTreeSet},
-};
+use std::collections::BTreeMap;
use anyhow::{Context, Error};
use lazy_static::lazy_static;
@@ -26,11 +23,39 @@ use regex::Regex;
use super::{
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
+ RelatedEventMatchCondition,
};
lazy_static! {
/// Used to parse the `is` clause in the room member count condition.
static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
+
+ /// Used to determine which MSC3931 room version feature flags are actually known to
+ /// the push evaluator.
+ static ref KNOWN_RVER_FLAGS: Vec = vec![
+ RoomVersionFeatures::ExtensibleEvents.as_str().to_string(),
+ ];
+
+ /// The "safe" rule IDs which are not affected by MSC3932's behaviour (room versions which
+ /// declare Extensible Events support ultimately *disable* push rules which do not declare
+ /// *any* MSC3931 room_version_supports condition).
+ static ref SAFE_EXTENSIBLE_EVENTS_RULE_IDS: Vec = vec![
+ "global/override/.m.rule.master".to_string(),
+ "global/override/.m.rule.roomnotif".to_string(),
+ "global/content/.m.rule.contains_user_name".to_string(),
+ ];
+}
+
+enum RoomVersionFeatures {
+ ExtensibleEvents,
+}
+
+impl RoomVersionFeatures {
+ fn as_str(&self) -> &'static str {
+ match self {
+ RoomVersionFeatures::ExtensibleEvents => "org.matrix.msc3932.extensible_events",
+ }
+ }
}
/// Allows running a set of push rules against a particular event.
@@ -49,29 +74,39 @@ pub struct PushRuleEvaluator {
/// The `notifications` section of the current power levels in the room.
notification_power_levels: BTreeMap,
- /// The relations related to the event as a mapping from relation type to
- /// set of sender/event type 2-tuples.
- relations: BTreeMap>,
-
- /// Is running "relation" conditions enabled?
- relation_match_enabled: bool,
-
/// The power level of the sender of the event, or None if event is an
/// outlier.
sender_power_level: Option,
+
+ /// The related events, indexed by relation type. Flattened in the same manner as
+ /// `flattened_keys`.
+ related_events_flattened: BTreeMap>,
+
+ /// If msc3664, push rules for related events, is enabled.
+ related_event_match_enabled: bool,
+
+ /// If MSC3931 is applicable, the feature flags for the room version.
+ room_version_feature_flags: Vec,
+
+ /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
+ /// flag as MSC1767 (extensible events core).
+ msc3931_enabled: bool,
}
#[pymethods]
impl PushRuleEvaluator {
/// Create a new `PushRuleEvaluator`. See struct docstring for details.
+ #[allow(clippy::too_many_arguments)]
#[new]
pub fn py_new(
flattened_keys: BTreeMap,
room_member_count: u64,
sender_power_level: Option,
notification_power_levels: BTreeMap,
- relations: BTreeMap>,
- relation_match_enabled: bool,
+ related_events_flattened: BTreeMap>,
+ related_event_match_enabled: bool,
+ room_version_feature_flags: Vec,
+ msc3931_enabled: bool,
) -> Result {
let body = flattened_keys
.get("content.body")
@@ -83,9 +118,11 @@ impl PushRuleEvaluator {
body,
room_member_count,
notification_power_levels,
- relations,
- relation_match_enabled,
sender_power_level,
+ related_events_flattened,
+ related_event_match_enabled,
+ room_version_feature_flags,
+ msc3931_enabled,
})
}
@@ -108,7 +145,19 @@ impl PushRuleEvaluator {
continue;
}
+ let rule_id = &push_rule.rule_id().to_string();
+ let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string();
+ let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag);
+ let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id);
+ let mut has_rver_condition = false;
+
for condition in push_rule.conditions.iter() {
+ has_rver_condition |= matches!(
+ condition,
+ // per MSC3932, we just need *any* room version condition to match
+ Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }),
+ );
+
match self.match_condition(condition, user_id, display_name) {
Ok(true) => {}
Ok(false) => continue 'outer,
@@ -119,6 +168,13 @@ impl PushRuleEvaluator {
}
}
+ // MSC3932: Disable push rules in extensible event-supporting room versions if they
+ // don't describe *any* MSC3931 room version condition, unless the rule is on the
+ // safe list.
+ if !has_rver_condition && !safe_from_rver_condition && supports_extensible_events {
+ continue;
+ }
+
let actions = push_rule
.actions
.iter()
@@ -170,6 +226,9 @@ impl PushRuleEvaluator {
KnownCondition::EventMatch(event_match) => {
self.match_event_match(event_match, user_id)?
}
+ KnownCondition::RelatedEventMatch(event_match) => {
+ self.match_related_event_match(event_match, user_id)?
+ }
KnownCondition::ContainsDisplayName => {
if let Some(dn) = display_name {
if !dn.is_empty() {
@@ -203,95 +262,99 @@ impl PushRuleEvaluator {
false
}
}
- KnownCondition::RelationMatch {
- rel_type,
- event_type_pattern,
- sender,
- sender_type,
- } => {
- self.match_relations(rel_type, sender, sender_type, user_id, event_type_pattern)?
+ KnownCondition::RoomVersionSupports { feature } => {
+ if !self.msc3931_enabled {
+ false
+ } else {
+ let flag = feature.to_string();
+ KNOWN_RVER_FLAGS.contains(&flag)
+ && self.room_version_feature_flags.contains(&flag)
+ }
}
};
Ok(result)
}
- /// Evaluates a relation condition.
- fn match_relations(
+ /// Evaluates a `event_match` condition.
+ fn match_event_match(
&self,
- rel_type: &str,
- sender: &Option>,
- sender_type: &Option>,
+ event_match: &EventMatchCondition,
user_id: Option<&str>,
- event_type_pattern: &Option>,
) -> Result {
- // First check if relation matching is enabled...
- if !self.relation_match_enabled {
- return Ok(false);
- }
-
- // ... and if there are any relations to match against.
- let relations = if let Some(relations) = self.relations.get(rel_type) {
- relations
- } else {
- return Ok(false);
- };
-
- // Extract the sender pattern from the condition
- let sender_pattern = if let Some(sender) = sender {
- Some(sender.as_ref())
- } else if let Some(sender_type) = sender_type {
- if sender_type == "user_id" {
- if let Some(user_id) = user_id {
- Some(user_id)
- } else {
- return Ok(false);
- }
+ let pattern = if let Some(pattern) = &event_match.pattern {
+ pattern
+ } else if let Some(pattern_type) = &event_match.pattern_type {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
} else {
- warn!("Unrecognized sender_type: {sender_type}");
return Ok(false);
+ };
+
+ match &**pattern_type {
+ "user_id" => user_id,
+ "user_localpart" => get_localpart_from_id(user_id)?,
+ _ => return Ok(false),
}
} else {
- None
+ return Ok(false);
};
- let mut sender_compiled_pattern = if let Some(pattern) = sender_pattern {
- Some(get_glob_matcher(pattern, GlobMatchType::Whole)?)
+ let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) {
+ haystack
} else {
- None
+ return Ok(false);
};
- let mut type_compiled_pattern = if let Some(pattern) = event_type_pattern {
- Some(get_glob_matcher(pattern, GlobMatchType::Whole)?)
+ // For the content.body we match against "words", but for everything
+ // else we match against the entire value.
+ let match_type = if event_match.key == "content.body" {
+ GlobMatchType::Word
} else {
- None
+ GlobMatchType::Whole
};
- for (relation_sender, event_type) in relations {
- if let Some(pattern) = &mut sender_compiled_pattern {
- if !pattern.is_match(relation_sender)? {
- continue;
- }
- }
-
- if let Some(pattern) = &mut type_compiled_pattern {
- if !pattern.is_match(event_type)? {
- continue;
- }
- }
-
- return Ok(true);
- }
-
- Ok(false)
+ let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
+ compiled_pattern.is_match(haystack)
}
- /// Evaluates a `event_match` condition.
- fn match_event_match(
+ /// Evaluates a `related_event_match` condition. (MSC3664)
+ fn match_related_event_match(
&self,
- event_match: &EventMatchCondition,
+ event_match: &RelatedEventMatchCondition,
user_id: Option<&str>,
) -> Result {
+ // First check if related event matching is enabled...
+ if !self.related_event_match_enabled {
+ return Ok(false);
+ }
+
+ // get the related event, fail if there is none.
+ let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
+ event
+ } else {
+ return Ok(false);
+ };
+
+ // If we are not matching fallbacks, don't match if our special key indicating this is a
+ // fallback relation is not present.
+ if !event_match.include_fallbacks.unwrap_or(false)
+ && event.contains_key("im.vector.is_falling_back")
+ {
+ return Ok(false);
+ }
+
+ // if we have no key, accept the event as matching, if it existed without matching any
+ // fields.
+ let key = if let Some(key) = &event_match.key {
+ key
+ } else {
+ return Ok(true);
+ };
+
let pattern = if let Some(pattern) = &event_match.pattern {
pattern
} else if let Some(pattern_type) = &event_match.pattern_type {
@@ -313,7 +376,7 @@ impl PushRuleEvaluator {
return Ok(false);
};
- let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) {
+ let haystack = if let Some(haystack) = event.get(&**key) {
haystack
} else {
return Ok(false);
@@ -321,7 +384,7 @@ impl PushRuleEvaluator {
// For the content.body we match against "words", but for everything
// else we match against the entire value.
- let match_type = if event_match.key == "content.body" {
+ let match_type = if key == "content.body" {
GlobMatchType::Word
} else {
GlobMatchType::Whole
@@ -366,9 +429,63 @@ fn push_rule_evaluator() {
BTreeMap::new(),
BTreeMap::new(),
true,
+ vec![],
+ true,
)
.unwrap();
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
assert_eq!(result.len(), 3);
}
+
+#[test]
+fn test_requires_room_version_supports_condition() {
+ use std::borrow::Cow;
+
+ use crate::push::{PushRule, PushRules};
+
+ let mut flattened_keys = BTreeMap::new();
+ flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
+ let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
+ let evaluator = PushRuleEvaluator::py_new(
+ flattened_keys,
+ 10,
+ Some(0),
+ BTreeMap::new(),
+ BTreeMap::new(),
+ false,
+ flags,
+ true,
+ )
+ .unwrap();
+
+ // first test: are the master and contains_user_name rules excluded from the "requires room
+ // version condition" check?
+ let mut result = evaluator.run(
+ &FilteredPushRules::default(),
+ Some("@bob:example.org"),
+ None,
+ );
+ assert_eq!(result.len(), 3);
+
+ // second test: if an appropriate push rule is in play, does it get handled?
+ let custom_rule = PushRule {
+ rule_id: Cow::from("global/underride/.org.example.extensible"),
+ priority_class: 1, // underride
+ conditions: Cow::from(vec![Condition::Known(
+ KnownCondition::RoomVersionSupports {
+ feature: Cow::from(RoomVersionFeatures::ExtensibleEvents.as_str().to_string()),
+ },
+ )]),
+ actions: Cow::from(vec![Action::Notify]),
+ default: false,
+ default_enabled: true,
+ };
+ let rules = PushRules::new(vec![custom_rule]);
+ result = evaluator.run(
+ &FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
+ None,
+ None,
+ );
+ assert_eq!(result.len(), 1);
+}
diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs
index 208b9c0d738e..2e9d3e38a17b 100644
--- a/rust/src/push/mod.rs
+++ b/rust/src/push/mod.rs
@@ -267,6 +267,8 @@ pub enum Condition {
#[serde(tag = "kind")]
pub enum KnownCondition {
EventMatch(EventMatchCondition),
+ #[serde(rename = "im.nheko.msc3664.related_event_match")]
+ RelatedEventMatch(RelatedEventMatchCondition),
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
@@ -275,15 +277,9 @@ pub enum KnownCondition {
SenderNotificationPermission {
key: Cow<'static, str>,
},
- #[serde(rename = "org.matrix.msc3772.relation_match")]
- RelationMatch {
- rel_type: Cow<'static, str>,
- #[serde(skip_serializing_if = "Option::is_none", rename = "type")]
- event_type_pattern: Option>,
- #[serde(skip_serializing_if = "Option::is_none")]
- sender: Option>,
- #[serde(skip_serializing_if = "Option::is_none")]
- sender_type: Option>,
+ #[serde(rename = "org.matrix.msc3931.room_version_supports")]
+ RoomVersionSupports {
+ feature: Cow<'static, str>,
},
}
@@ -309,6 +305,20 @@ pub struct EventMatchCondition {
pub pattern_type: Option>,
}
+/// The body of a [`Condition::RelatedEventMatch`]
+#[derive(Serialize, Deserialize, Debug, Clone)]
+pub struct RelatedEventMatchCondition {
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub key: Option>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern: Option>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub pattern_type: Option>,
+ pub rel_type: Cow<'static, str>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub include_fallbacks: Option,
+}
+
/// The collection of push rules for a user.
#[derive(Debug, Clone, Default)]
#[pyclass(frozen)]
@@ -401,7 +411,8 @@ impl PushRules {
pub struct FilteredPushRules {
push_rules: PushRules,
enabled_map: BTreeMap,
- msc3772_enabled: bool,
+ msc3664_enabled: bool,
+ msc1767_enabled: bool,
}
#[pymethods]
@@ -410,12 +421,14 @@ impl FilteredPushRules {
pub fn py_new(
push_rules: PushRules,
enabled_map: BTreeMap,
- msc3772_enabled: bool,
+ msc3664_enabled: bool,
+ msc1767_enabled: bool,
) -> Self {
Self {
push_rules,
enabled_map,
- msc3772_enabled,
+ msc3664_enabled,
+ msc1767_enabled,
}
}
@@ -434,12 +447,16 @@ impl FilteredPushRules {
.iter()
.filter(|rule| {
// Ignore disabled experimental push rules
- if !self.msc3772_enabled
- && rule.rule_id == "global/underride/.org.matrix.msc3772.thread_reply"
+ if !self.msc3664_enabled
+ && rule.rule_id == "global/override/.im.nheko.msc3664.reply"
{
return false;
}
+ if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
+ return false;
+ }
+
true
})
.map(|r| {
@@ -474,6 +491,29 @@ fn test_deserialize_condition() {
let _: Condition = serde_json::from_str(json).unwrap();
}
+#[test]
+fn test_deserialize_unstable_msc3664_condition() {
+ let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern":"coffee","rel_type":"m.in_reply_to"}"#;
+
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ assert!(matches!(
+ condition,
+ Condition::Known(KnownCondition::RelatedEventMatch(_))
+ ));
+}
+
+#[test]
+fn test_deserialize_unstable_msc3931_condition() {
+ let json =
+ r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#;
+
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ assert!(matches!(
+ condition,
+ Condition::Known(KnownCondition::RoomVersionSupports { feature: _ })
+ ));
+}
+
#[test]
fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#;
diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
index cd2e64b75f9d..744230019610 100755
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -27,6 +27,7 @@
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
+ "ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
)
DESC = """\
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index eab23f18f1e2..8741ba3e34b2 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -53,6 +53,12 @@ Run the complement test suite on Synapse.
Only build the Docker images. Don't actually run Complement.
Conflicts with -f/--fast.
+ -e, --editable
+ Use an editable build of Synapse, rebuilding the image if necessary.
+ This is suitable for use in development where a fast turn-around time
+ is important.
+ Not suitable for use in CI in case the editable environment is impure.
+
For help on arguments to 'go test', run 'go help testflag'.
EOF
}
@@ -73,6 +79,9 @@ while [ $# -ge 1 ]; do
"--build-only")
skip_complement_run=1
;;
+ "-e"|"--editable")
+ use_editable_synapse=1
+ ;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
@@ -96,25 +105,76 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi
+if [ -n "$use_editable_synapse" ]; then
+ if [[ -e synapse/synapse_rust.abi3.so ]]; then
+ # In an editable install, back up the host's compiled Rust module to prevent
+ # inconvenience; the container will overwrite the module with its own copy.
+ mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
+ # And restore it on exit:
+ synapse_pkg=`realpath synapse`
+ trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
+ fi
+
+ editable_mount="$(realpath .):/editable-src:z"
+ if docker inspect complement-synapse-editable &>/dev/null; then
+ # complement-synapse-editable already exists: see if we can still use it:
+ # - The Rust module must still be importable; it will fail to import if the Rust source has changed.
+ # - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
+
+ # First set up the module in the right place for an editable installation.
+ docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
+
+ if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
+ && docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
+ skip_docker_build=1
+ else
+ echo "Editable Synapse image is stale. Will rebuild."
+ unset skip_docker_build
+ fi
+ fi
+fi
+
if [ -z "$skip_docker_build" ]; then
- # Build the base Synapse image from the local checkout
- echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
- docker build -t matrixdotorg/synapse \
- --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
- --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
- -f "docker/Dockerfile" .
- echo_if_github "::endgroup::"
-
- # Build the workers docker image (from the base Synapse image we just built).
- echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
- docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
- echo_if_github "::endgroup::"
-
- # Build the unified Complement image (from the worker Synapse image we just built).
- echo_if_github "::group::Build Docker image: complement/Dockerfile"
- docker build -t complement-synapse \
- -f "docker/complement/Dockerfile" "docker/complement"
- echo_if_github "::endgroup::"
+ if [ -n "$use_editable_synapse" ]; then
+
+ # Build a special image designed for use in development with editable
+ # installs.
+ docker build -t synapse-editable \
+ -f "docker/editable.Dockerfile" .
+
+ docker build -t synapse-workers-editable \
+ --build-arg FROM=synapse-editable \
+ -f "docker/Dockerfile-workers" .
+
+ docker build -t complement-synapse-editable \
+ --build-arg FROM=synapse-workers-editable \
+ -f "docker/complement/Dockerfile" "docker/complement"
+
+ # Prepare the Rust module
+ docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
+
+ else
+
+ # Build the base Synapse image from the local checkout
+ echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
+ docker build -t matrixdotorg/synapse \
+ --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
+ --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
+ -f "docker/Dockerfile" .
+ echo_if_github "::endgroup::"
+
+ # Build the workers docker image (from the base Synapse image we just built).
+ echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
+ docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
+ echo_if_github "::endgroup::"
+
+ # Build the unified Complement image (from the worker Synapse image we just built).
+ echo_if_github "::group::Build Docker image: complement/Dockerfile"
+ docker build -t complement-synapse \
+ -f "docker/complement/Dockerfile" "docker/complement"
+ echo_if_github "::endgroup::"
+
+ fi
fi
if [ -n "$skip_complement_run" ]; then
@@ -123,10 +183,14 @@ if [ -n "$skip_complement_run" ]; then
fi
export COMPLEMENT_BASE_IMAGE=complement-synapse
+if [ -n "$use_editable_synapse" ]; then
+ export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
+ export COMPLEMENT_HOST_MOUNTS="$editable_mount"
+fi
extra_test_args=()
-test_tags="synapse_blacklist,msc2716,msc3030,msc3787"
+test_tags="synapse_blacklist,msc3787,msc3874"
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
@@ -139,6 +203,9 @@ if [[ -n "$WORKERS" ]]; then
# Use workers.
export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true
+ # Pass through the workers defined. If none, it will be an empty string
+ export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES"
+
# Workers can only use Postgres as a database.
export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres
@@ -158,7 +225,10 @@ else
# We only test faster room joins on monoliths, because they are purposefully
# being developed without worker support to start with.
- test_tags="$test_tags,faster_joins"
+ #
+ # The tests for importing historical messages (MSC2716) also only pass with monoliths,
+ # currently.
+ test_tags="$test_tags,faster_joins,msc2716"
fi
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index 763dd02c477e..b1d5e2e61667 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -46,11 +46,12 @@
import signedjson.types
import srvlookup
import yaml
+from requests import PreparedRequest, Response
from requests.adapters import HTTPAdapter
from urllib3 import HTTPConnectionPool
# uncomment the following to enable debug logging of http requests
-# from httplib import HTTPConnection
+# from http.client import HTTPConnection
# HTTPConnection.debuglevel = 1
@@ -103,6 +104,7 @@ def request(
destination: str,
path: str,
content: Optional[str],
+ verify_tls: bool,
) -> requests.Response:
if method is None:
if content is None:
@@ -141,7 +143,6 @@ def request(
s.mount("matrix://", MatrixConnectionAdapter())
headers: Dict[str, str] = {
- "Host": destination,
"Authorization": authorization_headers[0],
}
@@ -152,7 +153,7 @@ def request(
method=method,
url=dest,
headers=headers,
- verify=False,
+ verify=verify_tls,
data=content,
stream=True,
)
@@ -202,6 +203,12 @@ def main() -> None:
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
+ parser.add_argument(
+ "--insecure",
+ action="store_true",
+ help="Disable TLS certificate verification",
+ )
+
parser.add_argument(
"path", help="request path, including the '/_matrix/federation/...' prefix."
)
@@ -227,6 +234,7 @@ def main() -> None:
args.destination,
args.path,
content=args.body,
+ verify_tls=not args.insecure,
)
sys.stderr.write("Status Code: %d\n" % (result.status_code,))
@@ -254,36 +262,93 @@ def read_args_from_config(args: argparse.Namespace) -> None:
class MatrixConnectionAdapter(HTTPAdapter):
+ def send(
+ self,
+ request: PreparedRequest,
+ *args: Any,
+ **kwargs: Any,
+ ) -> Response:
+ # overrides the send() method in the base class.
+
+ # We need to look for .well-known redirects before passing the request up to
+ # HTTPAdapter.send().
+ assert isinstance(request.url, str)
+ parsed = urlparse.urlsplit(request.url)
+ server_name = parsed.netloc
+ well_known = self._get_well_known(parsed.netloc)
+
+ if well_known:
+ server_name = well_known
+
+ # replace the scheme in the uri with https, so that cert verification is done
+ # also replace the hostname if we got a .well-known result
+ request.url = urlparse.urlunsplit(
+ ("https", server_name, parsed.path, parsed.query, parsed.fragment)
+ )
+
+ # at this point we also add the host header (otherwise urllib will add one
+ # based on the `host` from the connection returned by `get_connection`,
+ # which will be wrong if there is an SRV record).
+ request.headers["Host"] = server_name
+
+ return super().send(request, *args, **kwargs)
+
+ def get_connection(
+ self, url: str, proxies: Optional[Dict[str, str]] = None
+ ) -> HTTPConnectionPool:
+ # overrides the get_connection() method in the base class
+ parsed = urlparse.urlsplit(url)
+ (host, port, ssl_server_name) = self._lookup(parsed.netloc)
+ print(
+ f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
+ )
+ return self.poolmanager.connection_from_host(
+ host,
+ port=port,
+ scheme="https",
+ pool_kwargs={"server_hostname": ssl_server_name},
+ )
+
@staticmethod
- def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]:
- if s[-1] == "]":
+ def _lookup(server_name: str) -> Tuple[str, int, str]:
+ """
+ Do an SRV lookup on a server name and return the host:port to connect to
+ Given the server_name (after any .well-known lookup), return the host, port and
+ the ssl server name
+ """
+ if server_name[-1] == "]":
# ipv6 literal (with no port)
- return s, 8448
+ return server_name, 8448, server_name
- if ":" in s:
- out = s.rsplit(":", 1)
+ if ":" in server_name:
+ # explicit port
+ out = server_name.rsplit(":", 1)
try:
port = int(out[1])
except ValueError:
- raise ValueError("Invalid host:port '%s'" % s)
- return out[0], port
-
- # try a .well-known lookup
- if not skip_well_known:
- well_known = MatrixConnectionAdapter.get_well_known(s)
- if well_known:
- return MatrixConnectionAdapter.lookup(well_known, skip_well_known=True)
+ raise ValueError("Invalid host:port '%s'" % (server_name,))
+ return out[0], port, out[0]
try:
- srv = srvlookup.lookup("matrix", "tcp", s)[0]
- return srv.host, srv.port
+ srv = srvlookup.lookup("matrix", "tcp", server_name)[0]
+ print(
+ f"SRV lookup on _matrix._tcp.{server_name} gave {srv}",
+ file=sys.stderr,
+ )
+ return srv.host, srv.port, server_name
except Exception:
- return s, 8448
+ return server_name, 8448, server_name
@staticmethod
- def get_well_known(server_name: str) -> Optional[str]:
- uri = "https://%s/.well-known/matrix/server" % (server_name,)
- print("fetching %s" % (uri,), file=sys.stderr)
+ def _get_well_known(server_name: str) -> Optional[str]:
+ if ":" in server_name:
+ # explicit port, or ipv6 literal. Either way, no .well-known
+ return None
+
+ # TODO: check for ipv4 literals
+
+ uri = f"https://{server_name}/.well-known/matrix/server"
+ print(f"fetching {uri}", file=sys.stderr)
try:
resp = requests.get(uri)
@@ -304,19 +369,6 @@ def get_well_known(server_name: str) -> Optional[str]:
print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
return None
- def get_connection(
- self, url: str, proxies: Optional[Dict[str, str]] = None
- ) -> HTTPConnectionPool:
- parsed = urlparse.urlparse(url)
-
- (host, port) = self.lookup(parsed.netloc)
- netloc = "%s:%d" % (host, port)
- print("Connecting to %s" % (netloc,), file=sys.stderr)
- url = urlparse.urlunparse(
- ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
- )
- return super().get_connection(url, proxies)
-
if __name__ == "__main__":
main()
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index c82c58c54b05..6974fd789575 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -27,7 +27,7 @@
import urllib.request
from os import path
from tempfile import TemporaryDirectory
-from typing import Any, List, Optional, cast
+from typing import Any, List, Optional
import attr
import click
@@ -174,9 +174,7 @@ def _prepare() -> None:
click.get_current_context().abort()
# Switch to the release branch.
- # Cast safety: parse() won't return a version.LegacyVersion from our
- # version string format.
- parsed_new_version = cast(version.Version, version.parse(new_version))
+ parsed_new_version = version.parse(new_version)
# We assume for debian changelogs that we only do RCs or full releases.
assert not parsed_new_version.is_devrelease
@@ -219,9 +217,7 @@ def _prepare() -> None:
update_branch(repo)
# Create the new release branch
- # Type ignore will no longer be needed after GitPython 3.1.28.
- # See https://github.com/gitpython-developers/GitPython/pull/1419
- repo.create_head(release_branch_name, commit=base_branch) # type: ignore[arg-type]
+ repo.create_head(release_branch_name, commit=base_branch)
# Special-case SyTest: we don't actually prepare any files so we may
# as well push it now (and only when we create a release branch;
diff --git a/synapse/replication/slave/storage/keys.py b/stubs/icu.pyi
similarity index 62%
rename from synapse/replication/slave/storage/keys.py
rename to stubs/icu.pyi
index a00b38c512a0..efeda7938a73 100644
--- a/synapse/replication/slave/storage/keys.py
+++ b/stubs/icu.pyi
@@ -1,4 +1,4 @@
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,9 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.storage.databases.main.keys import KeyStore
+# Stub for PyICU.
-# KeyStore isn't really safe to use from a worker, but for now we do so and hope that
-# the races it creates aren't too bad.
+class Locale:
+ @staticmethod
+ def getDefault() -> Locale: ...
-SlavedKeyStore = KeyStore
+class BreakIterator:
+ @staticmethod
+ def createWordInstance(locale: Locale) -> BreakIterator: ...
+ def setText(self, text: str) -> None: ...
+ def nextBoundary(self) -> int: ...
diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi
index 5900e61450b1..dab5d4aff7ce 100644
--- a/stubs/synapse/synapse_rust/push.pyi
+++ b/stubs/synapse/synapse_rust/push.pyi
@@ -1,4 +1,4 @@
-from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
+from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
from synapse.types import JsonDict
@@ -26,7 +26,11 @@ class PushRules:
class FilteredPushRules:
def __init__(
- self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3772_enabled: bool
+ self,
+ push_rules: PushRules,
+ enabled_map: Dict[str, bool],
+ msc3664_enabled: bool,
+ msc1767_enabled: bool,
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
@@ -39,12 +43,14 @@ class PushRuleEvaluator:
room_member_count: int,
sender_power_level: Optional[int],
notification_power_levels: Mapping[str, int],
- relations: Mapping[str, Set[Tuple[str, str]]],
- relation_match_enabled: bool,
+ related_events_flattened: Mapping[str, Mapping[str, str]],
+ related_event_match_enabled: bool,
+ room_version_feature_flags: Tuple[str, ...],
+ msc3931_enabled: bool,
): ...
def run(
self,
push_rules: FilteredPushRules,
user_id: Optional[str],
display_name: Optional[str],
- ) -> Collection[dict]: ...
+ ) -> Collection[Union[Mapping, str]]: ...
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 7bb07cfd0516..a6623e295663 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -21,6 +21,7 @@
import sys
from synapse.util.rust import check_rust_lib_up_to_date
+from synapse.util.stringutils import strtobool
# Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 7):
@@ -28,31 +29,29 @@
sys.exit(1)
# Allow using the asyncio reactor via env var.
-if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)):
- try:
- from incremental import Version
+if strtobool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", "0")):
+ from incremental import Version
- import twisted
+ import twisted
- # We need a bugfix that is included in Twisted 21.2.0:
- # https://twistedmatrix.com/trac/ticket/9787
- if twisted.version < Version("Twisted", 21, 2, 0):
- print("Using asyncio reactor requires Twisted>=21.2.0")
- sys.exit(1)
+ # We need a bugfix that is included in Twisted 21.2.0:
+ # https://twistedmatrix.com/trac/ticket/9787
+ if twisted.version < Version("Twisted", 21, 2, 0):
+ print("Using asyncio reactor requires Twisted>=21.2.0")
+ sys.exit(1)
- import asyncio
+ import asyncio
- from twisted.internet import asyncioreactor
+ from twisted.internet import asyncioreactor
- if bool(os.environ.get("SYNAPSE_UVLOOP", False)):
- import uvloop
+ if bool(os.environ.get("SYNAPSE_UVLOOP", False)):
+ import uvloop
- uvloop.install()
- print("Using uvloop")
+ uvloop.install()
+ print("Using uvloop")
+
+ asyncioreactor.install(asyncio.get_event_loop())
- asyncioreactor.install(asyncio.get_event_loop())
- except ImportError:
- pass
# Twisted and canonicaljson will fail to import when this file is executed to
# get the __version__ during a fresh install. That's OK and subsequent calls to
diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py
index 0c4504d5d8c5..2b74a4016687 100644
--- a/synapse/_scripts/register_new_matrix_user.py
+++ b/synapse/_scripts/register_new_matrix_user.py
@@ -222,6 +222,7 @@ def main() -> None:
args = parser.parse_args()
+ config: Optional[Dict[str, Any]] = None
if "config" in args and args.config:
config = yaml.safe_load(args.config)
@@ -229,7 +230,7 @@ def main() -> None:
secret = args.shared_secret
else:
# argparse should check that we have either config or shared secret
- assert config
+ assert config is not None
secret = config.get("registration_shared_secret")
secret_file = config.get("registration_shared_secret_path")
@@ -244,7 +245,7 @@ def main() -> None:
if args.server_url:
server_url = args.server_url
- elif config:
+ elif config is not None:
server_url = _find_client_listener(config)
if not server_url:
server_url = _DEFAULT_SERVER_URL
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 5fa599e70e90..d850e54e1751 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -72,6 +72,7 @@
RegistrationBackgroundUpdateStore,
find_max_generated_user_id_localpart,
)
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
@@ -206,6 +207,7 @@ class Store(
PusherWorkerStore,
PresenceBackgroundUpdateStore,
ReceiptsBackgroundUpdateStore,
+ RelationsWorkerStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py
old mode 100755
new mode 100644
index fb1fb83f50d9..0adf94bba68f
--- a/synapse/_scripts/update_synapse_database.py
+++ b/synapse/_scripts/update_synapse_database.py
@@ -15,7 +15,6 @@
import argparse
import logging
-import sys
from typing import cast
import yaml
@@ -100,13 +99,6 @@ def main() -> None:
# Load, process and sanity-check the config.
hs_config = yaml.safe_load(args.database_config)
- if "database" not in hs_config and "databases" not in hs_config:
- sys.stderr.write(
- "The configuration file must have a 'database' or 'databases' section. "
- "See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database"
- )
- sys.exit(4)
-
config = HomeServerConfig()
config.parse_config_dict(hs_config, "", "")
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 44c5ffc6a590..6a5e7171da90 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -125,6 +125,8 @@ class EventTypes:
MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
+ Reaction: Final = "m.reaction"
+
class ToDeviceEventTypes:
RoomKeyRequest: Final = "m.room_key_request"
@@ -150,6 +152,7 @@ class EduTypes:
class RejectedReason:
AUTH_ERROR: Final = "auth_error"
+ OVERSIZED_EVENT: Final = "oversized_event"
class RoomCreationPreset:
@@ -228,6 +231,9 @@ class EventContentFields:
# The authorising user for joining a restricted room.
AUTHORISING_USER: Final = "join_authorised_via_users_server"
+ # an unspecced field added to to-device messages to identify them uniquely-ish
+ TO_DEVICE_MSGID: Final = "org.matrix.msgid"
+
class RoomTypes:
"""Understood values of the room_type field of m.room.create events."""
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index c6062075690e..c2c177fd71d0 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -155,7 +155,13 @@ def __init__(self, location: bytes, http_code: int = http.FOUND):
class SynapseError(CodeMessageException):
"""A base exception type for matrix errors which have an errcode and error
- message (as well as an HTTP status code).
+ message (as well as an HTTP status code). These often bubble all the way up to the
+ client API response so the error code and status often reach the client directly as
+ defined here. If the error doesn't make sense to present to a client, then it
+ probably shouldn't be a `SynapseError`. For example, if we contact another
+ homeserver over federation, we shouldn't automatically ferry response errors back to
+ the client on our end (a 500 from a remote server does not make sense to a client
+ when our server did not experience a 500).
Attributes:
errcode: Matrix error code e.g 'M_FORBIDDEN'
@@ -294,10 +300,8 @@ def __init__(self, session_id: str, result: "JsonDict"):
class UnrecognizedRequestError(SynapseError):
"""An error indicating we don't understand the request you're trying to make"""
- def __init__(
- self, msg: str = "Unrecognized request", errcode: str = Codes.UNRECOGNIZED
- ):
- super().__init__(400, msg, errcode)
+ def __init__(self, msg: str = "Unrecognized request", code: int = 400):
+ super().__init__(code, msg, Codes.UNRECOGNIZED)
class NotFoundError(SynapseError):
@@ -420,8 +424,17 @@ def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
class EventSizeError(SynapseError):
"""An error raised when an event is too big."""
- def __init__(self, msg: str):
+ def __init__(self, msg: str, unpersistable: bool):
+ """
+ unpersistable:
+ if True, the PDU must not be persisted, not even as a rejected PDU
+ when received over federation.
+ This is notably true when the entire PDU exceeds the size limit for a PDU,
+ (as opposed to an individual key's size limit being exceeded).
+ """
+
super().__init__(413, msg, Codes.TOO_LARGE)
+ self.unpersistable = unpersistable
class LoginError(SynapseError):
@@ -600,8 +613,20 @@ def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
class FederationError(RuntimeError):
- """This class is used to inform remote homeservers about erroneous
- PDUs they sent us.
+ """
+ Raised when we process an erroneous PDU.
+
+ There are two kinds of scenarios where this exception can be raised:
+
+ 1. We may pull an invalid PDU from a remote homeserver (e.g. during backfill). We
+ raise this exception to signal an error to the rest of the application.
+ 2. We may be pushed an invalid PDU as part of a `/send` transaction from a remote
+ homeserver. We raise so that we can respond to the transaction and include the
+ error string in the "PDU Processing Result". The message which will likely be
+ ignored by the remote homeserver and is not machine parse-able since it's just a
+ string.
+
+ TODO: In the future, we should split these usage scenarios into their own error types.
FATAL: The remote server could not interpret the source event.
(e.g., it was missing a required field)
@@ -640,6 +665,27 @@ def get_dict(self) -> "JsonDict":
}
+class FederationPullAttemptBackoffError(RuntimeError):
+ """
+ Raised to indicate that we are are deliberately not attempting to pull the given
+ event over federation because we've already done so recently and are backing off.
+
+ Attributes:
+ event_id: The event_id which we are refusing to pull
+ message: A custom error message that gives more context
+ """
+
+ def __init__(self, event_ids: List[str], message: Optional[str]):
+ self.event_ids = event_ids
+
+ if message:
+ error_message = message
+ else:
+ error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)."
+
+ super().__init__(error_message)
+
+
class HttpResponseException(CodeMessageException):
"""
Represents an HTTP-level failure of an outbound request
@@ -674,7 +720,7 @@ def to_synapse_error(self) -> SynapseError:
set to the reason code from the HTTP response.
Returns:
- SynapseError:
+ The error converted to a SynapseError.
"""
# try to parse the body as json, to get better errcode/msg, but
# default to M_UNKNOWN with the HTTP status as the error text
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index cc31cf8cc74d..a9888381b451 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -36,14 +36,14 @@
from synapse.api.constants import EduTypes, EventContentFields
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
-from synapse.events import EventBase
+from synapse.events import EventBase, relation_from_event
from synapse.types import JsonDict, RoomID, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
@@ -53,11 +53,17 @@
# check types are valid event types
"types": {"type": "array", "items": {"type": "string"}},
"not_types": {"type": "array", "items": {"type": "string"}},
+ # MSC3874, filtering /messages.
+ "org.matrix.msc3874.rel_types": {"type": "array", "items": {"type": "string"}},
+ "org.matrix.msc3874.not_rel_types": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
},
}
ROOM_FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"not_rooms": {"$ref": "#/definitions/room_id_array"},
@@ -71,7 +77,7 @@
}
ROOM_EVENT_FILTER_SCHEMA = {
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
@@ -137,7 +143,7 @@
},
},
},
- "additionalProperties": False,
+ "additionalProperties": True, # Allow new fields for forward compatibility
}
@@ -334,8 +340,15 @@ def __init__(self, hs: "HomeServer", filter_json: JsonDict):
self.labels = filter_json.get("org.matrix.labels", None)
self.not_labels = filter_json.get("org.matrix.not_labels", [])
- self.related_by_senders = self.filter_json.get("related_by_senders", None)
- self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
+ self.related_by_senders = filter_json.get("related_by_senders", None)
+ self.related_by_rel_types = filter_json.get("related_by_rel_types", None)
+
+ # For compatibility with _check_fields.
+ self.rel_types = None
+ self.not_rel_types = []
+ if hs.config.experimental.msc3874_enabled:
+ self.rel_types = filter_json.get("org.matrix.msc3874.rel_types", None)
+ self.not_rel_types = filter_json.get("org.matrix.msc3874.not_rel_types", [])
def filters_all_types(self) -> bool:
return "*" in self.not_types
@@ -386,11 +399,19 @@ def _check(self, event: FilterEvent) -> bool:
# check if there is a string url field in the content for filtering purposes
labels = content.get(EventContentFields.LABELS, [])
+ # Check if the event has a relation.
+ rel_type = None
+ if isinstance(event, EventBase):
+ relation = relation_from_event(event)
+ if relation:
+ rel_type = relation.rel_type
+
field_matchers = {
"rooms": lambda v: room_id == v,
"senders": lambda v: sender == v,
"types": lambda v: _matches_wildcard(ev_type, v),
"labels": lambda v: v in labels,
+ "rel_types": lambda v: rel_type == v,
}
result = self._check_fields(field_matchers)
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 044c7d492666..511790c7c5e4 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -343,6 +343,7 @@ async def ratelimit(
requester: Requester,
update: bool = True,
is_admin_redaction: bool = False,
+ n_actions: int = 1,
) -> None:
"""Ratelimits requests.
@@ -355,6 +356,8 @@ async def ratelimit(
is_admin_redaction: Whether this is a room admin/moderator
redacting an event. If so then we may apply different
ratelimits depending on config.
+ n_actions: Multiplier for the number of actions to apply to the
+ rate limiter at once.
Raises:
LimitExceededError if the request should be ratelimited
@@ -383,7 +386,9 @@ async def ratelimit(
if is_admin_redaction and self.admin_redaction_ratelimiter:
# If we have separate config for admin redactions, use a separate
# ratelimiter as to not have user_ids clash
- await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
+ await self.admin_redaction_ratelimiter.ratelimit(
+ requester, update=update, n_actions=n_actions
+ )
else:
# Override rate and burst count per-user
await self.request_ratelimiter.ratelimit(
@@ -391,4 +396,5 @@ async def ratelimit(
rate_hz=messages_per_second,
burst_count=burst_count,
update=update,
+ n_actions=n_actions,
)
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index e37acb0f1edf..c397920fe54e 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Callable, Dict, Optional
+from typing import Callable, Dict, Optional, Tuple
import attr
@@ -51,6 +51,13 @@ class RoomDisposition:
UNSTABLE = "unstable"
+class PushRuleRoomFlag:
+ """Enum for listing possible MSC3931 room version feature flags, for push rules"""
+
+ # MSC3932: Room version supports MSC1767 Extensible Events.
+ EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events"
+
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RoomVersion:
"""An object which describes the unique attributes of a room version."""
@@ -91,6 +98,12 @@ class RoomVersion:
msc3787_knock_restricted_join_rule: bool
# MSC3667: Enforce integer power levels
msc3667_int_only_power_levels: bool
+ # MSC3931: Adds a push rule condition for "room version feature flags", making
+ # some push rules room version dependent. Note that adding a flag to this list
+ # is not enough to mark it "supported": the push rule evaluator also needs to
+ # support the flag. Unknown flags are ignored by the evaluator, making conditions
+ # fail if used.
+ msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
class RoomVersions:
@@ -111,6 +124,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V2 = RoomVersion(
"2",
@@ -129,6 +143,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V3 = RoomVersion(
"3",
@@ -147,6 +162,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V4 = RoomVersion(
"4",
@@ -165,6 +181,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V5 = RoomVersion(
"5",
@@ -183,6 +200,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V6 = RoomVersion(
"6",
@@ -201,6 +219,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
MSC2176 = RoomVersion(
"org.matrix.msc2176",
@@ -219,6 +238,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V7 = RoomVersion(
"7",
@@ -237,6 +257,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V8 = RoomVersion(
"8",
@@ -255,6 +276,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V9 = RoomVersion(
"9",
@@ -273,6 +295,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
MSC3787 = RoomVersion(
"org.matrix.msc3787",
@@ -291,6 +314,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
)
V10 = RoomVersion(
"10",
@@ -309,6 +333,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
+ msc3931_push_features=(),
)
MSC2716v4 = RoomVersion(
"org.matrix.msc2716v4",
@@ -327,6 +352,27 @@ class RoomVersions:
msc2716_redactions=True,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
+ msc3931_push_features=(),
+ )
+ MSC1767v10 = RoomVersion(
+ # MSC1767 (Extensible Events) based on room version "10"
+ "org.matrix.msc1767.10",
+ RoomDisposition.UNSTABLE,
+ EventFormatVersions.ROOM_V4_PLUS,
+ StateResolutionVersions.V2,
+ enforce_key_validity=True,
+ special_case_aliases_auth=False,
+ strict_canonicaljson=True,
+ limit_notifications_power_levels=True,
+ msc2176_redaction_rules=False,
+ msc3083_join_rules=True,
+ msc3375_redaction_rules=True,
+ msc2403_knocking=True,
+ msc2716_historical=False,
+ msc2716_redactions=False,
+ msc3787_knock_restricted_join_rule=True,
+ msc3667_int_only_power_levels=True,
+ msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
)
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index bd49fa6a5f03..a918579f5051 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -28,7 +28,7 @@
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
STATIC_PREFIX = "/_matrix/static"
-SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
+SERVER_KEY_PREFIX = "/_matrix/key"
MEDIA_R0_PREFIX = "/_matrix/media/r0"
MEDIA_V3_PREFIX = "/_matrix/media/v3"
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 000912e86ee9..a5aa2185a28e 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -47,6 +47,7 @@
from twisted.logger import LoggingFile, LogLevel
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.python.threadpool import ThreadPool
+from twisted.web.resource import Resource
import synapse.util.caches
from synapse.api.constants import MAX_PDU_SIZE
@@ -55,12 +56,13 @@
from synapse.config import ConfigError
from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig
-from synapse.config.server import ManholeConfig
+from synapse.config.server import ListenerConfig, ManholeConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.handlers.auth import load_legacy_password_auth_providers
+from synapse.http.site import SynapseSite
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import init_tracer
from synapse.metrics import install_gc_manager, register_threadpool
@@ -264,26 +266,18 @@ async def wrapper() -> None:
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
-def listen_metrics(
- bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
-) -> None:
+def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
"""
Start Prometheus metrics server.
"""
from prometheus_client import start_http_server as start_http_server_prometheus
- from synapse.metrics import (
- RegistryProxy,
- start_http_server as start_http_server_legacy,
- )
+ from synapse.metrics import RegistryProxy
for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port)
- if enable_legacy_metric_names:
- start_http_server_legacy(port, addr=host, registry=RegistryProxy)
- else:
- _set_prometheus_client_use_created_metrics(False)
- start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
+ _set_prometheus_client_use_created_metrics(False)
+ start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
@@ -357,6 +351,55 @@ def listen_tcp(
return r # type: ignore[return-value]
+def listen_http(
+ listener_config: ListenerConfig,
+ root_resource: Resource,
+ version_string: str,
+ max_request_body_size: int,
+ context_factory: Optional[IOpenSSLContextFactory],
+ reactor: ISynapseReactor = reactor,
+) -> List[Port]:
+ port = listener_config.port
+ bind_addresses = listener_config.bind_addresses
+ tls = listener_config.tls
+
+ assert listener_config.http_options is not None
+
+ site_tag = listener_config.http_options.tag
+ if site_tag is None:
+ site_tag = str(port)
+
+ site = SynapseSite(
+ "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
+ site_tag,
+ listener_config,
+ root_resource,
+ version_string,
+ max_request_body_size=max_request_body_size,
+ reactor=reactor,
+ )
+ if tls:
+ # refresh_certificate should have been called before this.
+ assert context_factory is not None
+ ports = listen_ssl(
+ bind_addresses,
+ port,
+ site,
+ context_factory,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d (TLS)", port)
+ else:
+ ports = listen_tcp(
+ bind_addresses,
+ port,
+ site,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d", port)
+ return ports
+
+
def listen_ssl(
bind_addresses: Collection[str],
port: int,
@@ -558,7 +601,7 @@ def reload_cache_config(config: HomeServerConfig) -> None:
logger.warning(f)
else:
logger.debug(
- "New cache config. Was:\n %s\nNow:\n",
+ "New cache config. Was:\n %s\nNow:\n %s",
previous_cache_config.__dict__,
config.caches.__dict__,
)
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 3c8c00ea5bc4..165d1c5db06b 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -28,10 +28,6 @@
from synapse.config.logger import setup_logging
from synapse.events import EventBase
from synapse.handlers.admin import ExfiltrationWriter
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.server import HomeServer
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
@@ -40,10 +36,24 @@
ApplicationServiceWorkerStore,
)
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
+from synapse.storage.databases.main.devices import DeviceWorkerStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
+ EventPushActionsWorkerStore,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.types import StateMap
from synapse.util import SYNAPSE_VERSION
from synapse.util.logcontext import LoggingContext
@@ -52,17 +62,25 @@
class AdminCmdSlavedStore(
- SlavedFilteringStore,
- SlavedPushRuleStore,
- SlavedEventStore,
- SlavedDeviceStore,
+ FilteringWorkerStore,
+ DeviceWorkerStore,
TagsWorkerStore,
DeviceInboxWorkerStore,
AccountDataWorkerStore,
+ PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
- RegistrationWorkerStore,
+ RoomMemberWorkerStore,
+ RelationsWorkerStore,
+ EventFederationWorkerStore,
+ EventPushActionsWorkerStore,
+ StateGroupWorkerStore,
+ SignatureWorkerStore,
+ UserErasureWorkerStore,
ReceiptsWorkerStore,
+ StreamWorkerStore,
+ EventsWorkerStore,
+ RegistrationWorkerStore,
RoomWorkerStore,
):
def __init__(
diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py
index b22f315453ba..8c0f4a57e70a 100644
--- a/synapse/app/complement_fork_starter.py
+++ b/synapse/app/complement_fork_starter.py
@@ -55,13 +55,13 @@
import signal
import sys
from types import FrameType
-from typing import Any, Callable, List, Optional
+from typing import Any, Callable, Dict, List, Optional
from twisted.internet.main import installReactor
# a list of the original signal handlers, before we installed our custom ones.
# We restore these in our child processes.
-_original_signal_handlers: dict[int, Any] = {}
+_original_signal_handlers: Dict[int, Any] = {}
class ProxiedReactor:
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 5e3825fca608..bcc8abe20c1f 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -14,21 +14,19 @@
# limitations under the License.
import logging
import sys
-from typing import Dict, List, Optional, Tuple
+from typing import Dict, List
-from twisted.internet import address
from twisted.web.resource import Resource
import synapse
import synapse.events
-from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.api.urls import (
CLIENT_API_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_R0_PREFIX,
MEDIA_V3_PREFIX,
- SERVER_KEY_V2_PREFIX,
+ SERVER_KEY_PREFIX,
)
from synapse.app import _base
from synapse.app._base import (
@@ -43,52 +41,13 @@
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource
-from synapse.http.servlet import RestServlet, parse_json_object_from_request
-from synapse.http.site import SynapseRequest, SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
-from synapse.replication.slave.storage.devices import SlavedDeviceStore
-from synapse.replication.slave.storage.events import SlavedEventStore
-from synapse.replication.slave.storage.filtering import SlavedFilteringStore
-from synapse.replication.slave.storage.keys import SlavedKeyStore
-from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
-from synapse.replication.slave.storage.pushers import SlavedPusherStore
+from synapse.rest import ClientRestResource
from synapse.rest.admin import register_servlets_for_media_repo
-from synapse.rest.client import (
- account_data,
- events,
- initial_sync,
- login,
- presence,
- profile,
- push_rule,
- read_marker,
- receipts,
- room,
- room_batch,
- room_keys,
- sendtodevice,
- sync,
- tags,
- user_directory,
- versions,
- voip,
-)
-from synapse.rest.client._base import client_patterns
-from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
-from synapse.rest.client.devices import DevicesRestServlet
-from synapse.rest.client.keys import (
- KeyChangesServlet,
- KeyQueryServlet,
- OneTimeKeyServlet,
-)
-from synapse.rest.client.register import (
- RegisterRestServlet,
- RegistrationTokenValidityRestServlet,
-)
from synapse.rest.health import HealthResource
-from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.rest.well_known import well_known_resource
from synapse.server import HomeServer
@@ -100,8 +59,16 @@
from synapse.storage.databases.main.censor_events import CensorEventsStore
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore
+from synapse.storage.databases.main.devices import DeviceWorkerStore
from synapse.storage.databases.main.directory import DirectoryWorkerStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
+from synapse.storage.databases.main.event_push_actions import (
+ EventPushActionsWorkerStore,
+)
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.keys import KeyStore
from synapse.storage.databases.main.lock import LockStore
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
from synapse.storage.databases.main.metrics import ServerMetricsStore
@@ -110,118 +77,31 @@
)
from synapse.storage.databases.main.presence import PresenceStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
+from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
+from synapse.storage.databases.main.pusher import PusherWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
+from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.storage.databases.main.room_batch import RoomBatchStore
+from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.session import SessionStore
+from synapse.storage.databases.main.signatures import SignatureWorkerStore
+from synapse.storage.databases.main.state import StateGroupWorkerStore
from synapse.storage.databases.main.stats import StatsStore
+from synapse.storage.databases.main.stream import StreamWorkerStore
from synapse.storage.databases.main.tags import TagsWorkerStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
from synapse.storage.databases.main.user_directory import UserDirectoryStore
-from synapse.types import JsonDict
+from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
from synapse.util import SYNAPSE_VERSION
from synapse.util.httpresourcetree import create_resource_tree
logger = logging.getLogger("synapse.app.generic_worker")
-class KeyUploadServlet(RestServlet):
- """An implementation of the `KeyUploadServlet` that responds to read only
- requests, but otherwise proxies through to the master instance.
- """
-
- PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$")
-
- def __init__(self, hs: HomeServer):
- """
- Args:
- hs: server
- """
- super().__init__()
- self.auth = hs.get_auth()
- self.store = hs.get_datastores().main
- self.http_client = hs.get_simple_http_client()
- self.main_uri = hs.config.worker.worker_main_http_uri
-
- async def on_POST(
- self, request: SynapseRequest, device_id: Optional[str]
- ) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=True)
- user_id = requester.user.to_string()
- body = parse_json_object_from_request(request)
-
- if device_id is not None:
- # passing the device_id here is deprecated; however, we allow it
- # for now for compatibility with older clients.
- if requester.device_id is not None and device_id != requester.device_id:
- logger.warning(
- "Client uploading keys for a different device "
- "(logged in as %s, uploading for %s)",
- requester.device_id,
- device_id,
- )
- else:
- device_id = requester.device_id
-
- if device_id is None:
- raise SynapseError(
- 400, "To upload keys, you must pass device_id when authenticating"
- )
-
- if body:
- # They're actually trying to upload something, proxy to main synapse.
-
- # Proxy headers from the original request, such as the auth headers
- # (in case the access token is there) and the original IP /
- # User-Agent of the request.
- headers = {
- header: request.requestHeaders.getRawHeaders(header, [])
- for header in (b"Authorization", b"User-Agent")
- }
- # Add the previous hop to the X-Forwarded-For header.
- x_forwarded_for = request.requestHeaders.getRawHeaders(
- b"X-Forwarded-For", []
- )
- # we use request.client here, since we want the previous hop, not the
- # original client (as returned by request.getClientAddress()).
- if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
- previous_host = request.client.host.encode("ascii")
- # If the header exists, add to the comma-separated list of the first
- # instance of the header. Otherwise, generate a new header.
- if x_forwarded_for:
- x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
- x_forwarded_for.extend(x_forwarded_for[1:])
- else:
- x_forwarded_for = [previous_host]
- headers[b"X-Forwarded-For"] = x_forwarded_for
-
- # Replicate the original X-Forwarded-Proto header. Note that
- # XForwardedForRequest overrides isSecure() to give us the original protocol
- # used by the client, as opposed to the protocol used by our upstream proxy
- # - which is what we want here.
- headers[b"X-Forwarded-Proto"] = [
- b"https" if request.isSecure() else b"http"
- ]
-
- try:
- result = await self.http_client.post_json_get_json(
- self.main_uri + request.uri.decode("ascii"), body, headers=headers
- )
- except HttpResponseException as e:
- raise e.to_synapse_error() from e
- except RequestSendFailed as e:
- raise SynapseError(502, "Failed to talk to master") from e
-
- return 200, result
- else:
- # Just interested in counts.
- result = await self.store.count_e2e_one_time_keys(user_id, device_id)
- return 200, {"one_time_key_counts": result}
-
-
class GenericWorkerSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
@@ -231,26 +111,36 @@ class GenericWorkerSlavedStore(
EndToEndRoomKeyStore,
PresenceStore,
DeviceInboxWorkerStore,
- SlavedDeviceStore,
- SlavedPushRuleStore,
+ DeviceWorkerStore,
TagsWorkerStore,
AccountDataWorkerStore,
- SlavedPusherStore,
CensorEventsStore,
ClientIpWorkerStore,
- SlavedEventStore,
- SlavedKeyStore,
+ # KeyStore isn't really safe to use from a worker, but for now we do so and hope that
+ # the races it creates aren't too bad.
+ KeyStore,
RoomWorkerStore,
RoomBatchStore,
DirectoryWorkerStore,
+ PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
ProfileWorkerStore,
- SlavedFilteringStore,
+ FilteringWorkerStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
ServerMetricsStore,
+ PusherWorkerStore,
+ RoomMemberWorkerStore,
+ RelationsWorkerStore,
+ EventFederationWorkerStore,
+ EventPushActionsWorkerStore,
+ StateGroupWorkerStore,
+ SignatureWorkerStore,
+ UserErasureWorkerStore,
ReceiptsWorkerStore,
+ StreamWorkerStore,
+ EventsWorkerStore,
RegistrationWorkerStore,
SearchStore,
TransactionWorkerStore,
@@ -267,15 +157,9 @@ class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
def _listen_http(self, listener_config: ListenerConfig) -> None:
- port = listener_config.port
- bind_addresses = listener_config.bind_addresses
assert listener_config.http_options is not None
- site_tag = listener_config.http_options.tag
- if site_tag is None:
- site_tag = str(port)
-
# We always include a health resource.
resources: Dict[str, Resource] = {"/health": HealthResource()}
@@ -284,52 +168,15 @@ def _listen_http(self, listener_config: ListenerConfig) -> None:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
- resource = JsonResource(self, canonical_json=False)
-
- RegisterRestServlet(self).register(resource)
- RegistrationTokenValidityRestServlet(self).register(resource)
- login.register_servlets(self, resource)
- ThreepidRestServlet(self).register(resource)
- WhoamiRestServlet(self).register(resource)
- DevicesRestServlet(self).register(resource)
-
- # Read-only
- KeyUploadServlet(self).register(resource)
- KeyQueryServlet(self).register(resource)
- KeyChangesServlet(self).register(resource)
- OneTimeKeyServlet(self).register(resource)
-
- voip.register_servlets(self, resource)
- push_rule.register_servlets(self, resource)
- versions.register_servlets(self, resource)
-
- profile.register_servlets(self, resource)
+ resource: Resource = ClientRestResource(self)
- sync.register_servlets(self, resource)
- events.register_servlets(self, resource)
- room.register_servlets(self, resource, is_worker=True)
- room.register_deprecated_servlets(self, resource)
- initial_sync.register_servlets(self, resource)
- room_batch.register_servlets(self, resource)
- room_keys.register_servlets(self, resource)
- tags.register_servlets(self, resource)
- account_data.register_servlets(self, resource)
- receipts.register_servlets(self, resource)
- read_marker.register_servlets(self, resource)
-
- sendtodevice.register_servlets(self, resource)
-
- user_directory.register_servlets(self, resource)
-
- presence.register_servlets(self, resource)
-
- resources.update({CLIENT_API_PREFIX: resource})
+ resources[CLIENT_API_PREFIX] = resource
resources.update(build_synapse_client_resource_tree(self))
- resources.update({"/.well-known": well_known_resource(self)})
+ resources["/.well-known"] = well_known_resource(self)
elif name == "federation":
- resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
+ resources[FEDERATION_PREFIX] = TransportLayerServer(self)
elif name == "media":
if self.config.media.can_load_media_repo:
media_repo = self.get_media_repository_resource()
@@ -357,16 +204,12 @@ def _listen_http(self, listener_config: ListenerConfig) -> None:
# Only load the openid resource separately if federation resource
# is not specified since federation resource includes openid
# resource.
- resources.update(
- {
- FEDERATION_PREFIX: TransportLayerServer(
- self, servlet_groups=["openid"]
- )
- }
+ resources[FEDERATION_PREFIX] = TransportLayerServer(
+ self, servlet_groups=["openid"]
)
if name in ["keys", "federation"]:
- resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+ resources[SERVER_KEY_PREFIX] = KeyResource(self)
if name == "replication":
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
@@ -377,23 +220,15 @@ def _listen_http(self, listener_config: ListenerConfig) -> None:
root_resource = create_resource_tree(resources, OptionsResource())
- _base.listen_tcp(
- bind_addresses,
- port,
- SynapseSite(
- "synapse.access.http.%s" % (site_tag,),
- site_tag,
- listener_config,
- root_resource,
- self.version_string,
- max_request_body_size=max_request_body_size(self.config),
- reactor=self.get_reactor(),
- ),
+ _base.listen_http(
+ listener_config,
+ root_resource,
+ self.version_string,
+ max_request_body_size(self.config),
+ self.tls_server_context_factory,
reactor=self.get_reactor(),
)
- logger.info("Synapse worker now listening on port %d", port)
-
def start_listening(self) -> None:
for listener in self.config.worker.worker_listeners:
if listener.type == "http":
@@ -415,7 +250,6 @@ def start_listening(self) -> None:
_base.listen_metrics(
listener.bind_addresses,
listener.port,
- enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
)
else:
logger.warning("Unsupported listener type: %s", listener.type)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 883f2fd2ecd8..b9be558c7ea0 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -31,14 +31,13 @@
LEGACY_MEDIA_PREFIX,
MEDIA_R0_PREFIX,
MEDIA_V3_PREFIX,
- SERVER_KEY_V2_PREFIX,
+ SERVER_KEY_PREFIX,
STATIC_PREFIX,
)
from synapse.app import _base
from synapse.app._base import (
handle_startup_exception,
- listen_ssl,
- listen_tcp,
+ listen_http,
max_request_body_size,
redirect_stdio_to_logs,
register_start,
@@ -53,14 +52,13 @@
RootOptionsRedirectResource,
StaticResource,
)
-from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.rest import ClientRestResource
from synapse.rest.admin import AdminRestResource
from synapse.rest.health import HealthResource
-from synapse.rest.key.v2 import KeyApiV2Resource
+from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.rest.well_known import well_known_resource
from synapse.server import HomeServer
@@ -83,8 +81,6 @@ def _listener_http(
self, config: HomeServerConfig, listener_config: ListenerConfig
) -> Iterable[Port]:
port = listener_config.port
- bind_addresses = listener_config.bind_addresses
- tls = listener_config.tls
# Must exist since this is an HTTP listener.
assert listener_config.http_options is not None
site_tag = listener_config.http_options.tag
@@ -140,37 +136,15 @@ def _listener_http(
else:
root_resource = OptionsResource()
- site = SynapseSite(
- "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
- site_tag,
+ ports = listen_http(
listener_config,
create_resource_tree(resources, root_resource),
self.version_string,
- max_request_body_size=max_request_body_size(self.config),
+ max_request_body_size(self.config),
+ self.tls_server_context_factory,
reactor=self.get_reactor(),
)
- if tls:
- # refresh_certificate should have been called before this.
- assert self.tls_server_context_factory is not None
- ports = listen_ssl(
- bind_addresses,
- port,
- site,
- self.tls_server_context_factory,
- reactor=self.get_reactor(),
- )
- logger.info("Synapse now listening on TCP port %d (TLS)", port)
-
- else:
- ports = listen_tcp(
- bind_addresses,
- port,
- site,
- reactor=self.get_reactor(),
- )
- logger.info("Synapse now listening on TCP port %d", port)
-
return ports
def _configure_named_resource(
@@ -215,30 +189,22 @@ def _configure_named_resource(
consent_resource: Resource = ConsentResource(self)
if compress:
consent_resource = gz_wrap(consent_resource)
- resources.update({"/_matrix/consent": consent_resource})
+ resources["/_matrix/consent"] = consent_resource
if name == "federation":
federation_resource: Resource = TransportLayerServer(self)
if compress:
federation_resource = gz_wrap(federation_resource)
- resources.update({FEDERATION_PREFIX: federation_resource})
+ resources[FEDERATION_PREFIX] = federation_resource
if name == "openid":
- resources.update(
- {
- FEDERATION_PREFIX: TransportLayerServer(
- self, servlet_groups=["openid"]
- )
- }
+ resources[FEDERATION_PREFIX] = TransportLayerServer(
+ self, servlet_groups=["openid"]
)
if name in ["static", "client"]:
- resources.update(
- {
- STATIC_PREFIX: StaticResource(
- os.path.join(os.path.dirname(synapse.__file__), "static")
- )
- }
+ resources[STATIC_PREFIX] = StaticResource(
+ os.path.join(os.path.dirname(synapse.__file__), "static")
)
if name in ["media", "federation", "client"]:
@@ -257,7 +223,7 @@ def _configure_named_resource(
)
if name in ["keys", "federation"]:
- resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
+ resources[SERVER_KEY_PREFIX] = KeyResource(self)
if name == "metrics" and self.config.metrics.enable_metrics:
metrics_resource: Resource = MetricsResource(RegistryProxy)
@@ -299,7 +265,6 @@ def start_listening(self) -> None:
_base.listen_metrics(
listener.bind_addresses,
listener.port,
- enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
)
else:
# this shouldn't happen, as the listener type should have been checked
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 0dfa00df44c7..65615f50b8be 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -32,9 +32,9 @@
logger = logging.getLogger(__name__)
-# Type for the `device_one_time_key_counts` field in an appservice transaction
+# Type for the `device_one_time_keys_count` field in an appservice transaction
# user ID -> {device ID -> {algorithm -> count}}
-TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]]
+TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]]
# Type for the `device_unused_fallback_key_types` field in an appservice transaction
# user ID -> {device ID -> [algorithm]}
@@ -172,12 +172,24 @@ async def _matches_user_in_member_list(
Returns:
True if this service would like to know about this room.
"""
- member_list = await store.get_users_in_room(
+ # We can use `get_local_users_in_room(...)` here because an application service
+ # can only be interested in local users of the server it's on (ignore any remote
+ # users that might match the user namespace regex).
+ #
+ # In the future, we can consider re-using
+ # `store.get_app_service_users_in_room` which is very similar to this
+ # function but has a slightly worse performance than this because we
+ # have an early escape-hatch if we find a single user that the
+ # appservice is interested in. The juice would be worth the squeeze if
+ # `store.get_app_service_users_in_room` was used in more places besides
+ # an experimental MSC. But for now we can avoid doing more work and
+ # barely using it later.
+ local_user_ids = await store.get_local_users_in_room(
room_id, on_invalidate=cache_context.invalidate
)
# check joined member events
- for user_id in member_list:
+ for user_id in local_user_ids:
if self.is_interested_in_user(user_id):
return True
return False
@@ -233,7 +245,9 @@ async def is_interested_in_room(
return True
# likewise with the room's aliases (if it has any)
- alias_list = await store.get_aliases_for_room(room_id)
+ alias_list = await store.get_aliases_for_room(
+ room_id, on_invalidate=cache_context.invalidate
+ )
for alias in alias_list:
if self.is_room_alias_in_namespace(alias):
return True
@@ -299,7 +313,9 @@ async def is_interested_in_presence(
# Find all the rooms the sender is in
if self.is_interested_in_user(user_id.to_string()):
return True
- room_ids = await store.get_rooms_for_user(user_id.to_string())
+ room_ids = await store.get_rooms_for_user(
+ user_id.to_string(), on_invalidate=cache_context.invalidate
+ )
# Then find out if the appservice is interested in any of those rooms
for room_id in room_ids:
@@ -364,7 +380,7 @@ def __init__(
events: List[EventBase],
ephemeral: List[JsonDict],
to_device_messages: List[JsonDict],
- one_time_key_counts: TransactionOneTimeKeyCounts,
+ one_time_keys_count: TransactionOneTimeKeysCount,
unused_fallback_keys: TransactionUnusedFallbackKeys,
device_list_summary: DeviceListUpdates,
):
@@ -373,7 +389,7 @@ def __init__(
self.events = events
self.ephemeral = ephemeral
self.to_device_messages = to_device_messages
- self.one_time_key_counts = one_time_key_counts
+ self.one_time_keys_count = one_time_keys_count
self.unused_fallback_keys = unused_fallback_keys
self.device_list_summary = device_list_summary
@@ -390,7 +406,7 @@ async def send(self, as_api: "ApplicationServiceApi") -> bool:
events=self.events,
ephemeral=self.ephemeral,
to_device_messages=self.to_device_messages,
- one_time_key_counts=self.one_time_key_counts,
+ one_time_keys_count=self.one_time_keys_count,
unused_fallback_keys=self.unused_fallback_keys,
device_list_summary=self.device_list_summary,
txn_id=self.id,
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index fbac4375b096..edafd433cda3 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -23,7 +23,7 @@
from synapse.api.errors import CodeMessageException
from synapse.appservice import (
ApplicationService,
- TransactionOneTimeKeyCounts,
+ TransactionOneTimeKeysCount,
TransactionUnusedFallbackKeys,
)
from synapse.events import EventBase
@@ -123,7 +123,7 @@ async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
response = await self.get_json(
uri,
{"access_token": service.hs_token},
- headers={"Authorization": f"Bearer {service.hs_token}"},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
return True
@@ -147,7 +147,7 @@ async def query_alias(self, service: "ApplicationService", alias: str) -> bool:
response = await self.get_json(
uri,
{"access_token": service.hs_token},
- headers={"Authorization": f"Bearer {service.hs_token}"},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
return True
@@ -190,7 +190,9 @@ async def query_3pe(
b"access_token": service.hs_token,
}
response = await self.get_json(
- uri, args=args, headers={"Authorization": f"Bearer {service.hs_token}"}
+ uri,
+ args=args,
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if not isinstance(response, list):
logger.warning(
@@ -230,7 +232,7 @@ async def _get() -> Optional[JsonDict]:
info = await self.get_json(
uri,
{"access_token": service.hs_token},
- headers={"Authorization": f"Bearer {service.hs_token}"},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if not _is_valid_3pe_metadata(info):
@@ -260,7 +262,7 @@ async def push_bulk(
events: List[EventBase],
ephemeral: List[JsonDict],
to_device_messages: List[JsonDict],
- one_time_key_counts: TransactionOneTimeKeyCounts,
+ one_time_keys_count: TransactionOneTimeKeysCount,
unused_fallback_keys: TransactionUnusedFallbackKeys,
device_list_summary: DeviceListUpdates,
txn_id: Optional[int] = None,
@@ -308,10 +310,13 @@ async def push_bulk(
# TODO: Update to stable prefixes once MSC3202 completes FCP merge
if service.msc3202_transaction_extensions:
- if one_time_key_counts:
+ if one_time_keys_count:
body[
"org.matrix.msc3202.device_one_time_key_counts"
- ] = one_time_key_counts
+ ] = one_time_keys_count
+ body[
+ "org.matrix.msc3202.device_one_time_keys_count"
+ ] = one_time_keys_count
if unused_fallback_keys:
body[
"org.matrix.msc3202.device_unused_fallback_key_types"
@@ -327,7 +332,7 @@ async def push_bulk(
uri=uri,
json_body=body,
args={"access_token": service.hs_token},
- headers={"Authorization": f"Bearer {service.hs_token}"},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 430ffbcd1fc8..7b562795a3f2 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -64,7 +64,7 @@
from synapse.appservice import (
ApplicationService,
ApplicationServiceState,
- TransactionOneTimeKeyCounts,
+ TransactionOneTimeKeysCount,
TransactionUnusedFallbackKeys,
)
from synapse.appservice.api import ApplicationServiceApi
@@ -258,7 +258,7 @@ async def _send_request(self, service: ApplicationService) -> None:
):
return
- one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None
+ one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None
if (
@@ -269,7 +269,7 @@ async def _send_request(self, service: ApplicationService) -> None:
# for the users which are mentioned in this transaction,
# as well as the appservice's sender.
(
- one_time_key_counts,
+ one_time_keys_count,
unused_fallback_keys,
) = await self._compute_msc3202_otk_counts_and_fallback_keys(
service, events, ephemeral, to_device_messages_to_send
@@ -281,7 +281,7 @@ async def _send_request(self, service: ApplicationService) -> None:
events,
ephemeral,
to_device_messages_to_send,
- one_time_key_counts,
+ one_time_keys_count,
unused_fallback_keys,
device_list_summary,
)
@@ -296,7 +296,7 @@ async def _compute_msc3202_otk_counts_and_fallback_keys(
events: Iterable[EventBase],
ephemerals: Iterable[JsonDict],
to_device_messages: Iterable[JsonDict],
- ) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]:
+ ) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]:
"""
Given a list of the events, ephemeral messages and to-device messages,
- first computes a list of application services users that may have
@@ -367,7 +367,7 @@ async def send(
events: List[EventBase],
ephemeral: Optional[List[JsonDict]] = None,
to_device_messages: Optional[List[JsonDict]] = None,
- one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None,
+ one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None,
unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
device_list_summary: Optional[DeviceListUpdates] = None,
) -> None:
@@ -380,7 +380,7 @@ async def send(
events: The persistent events to include in the transaction.
ephemeral: The ephemeral events to include in the transaction.
to_device_messages: The to-device messages to include in the transaction.
- one_time_key_counts: Counts of remaining one-time keys for relevant
+ one_time_keys_count: Counts of remaining one-time keys for relevant
appservice devices in the transaction.
unused_fallback_keys: Lists of unused fallback keys for relevant
appservice devices in the transaction.
@@ -397,7 +397,7 @@ async def send(
events=events,
ephemeral=ephemeral or [],
to_device_messages=to_device_messages or [],
- one_time_key_counts=one_time_key_counts or {},
+ one_time_keys_count=one_time_keys_count or {},
unused_fallback_keys=unused_fallback_keys or {},
device_list_summary=device_list_summary or DeviceListUpdates(),
)
diff --git a/synapse/config/_util.py b/synapse/config/_util.py
index 3edb4b71068f..d3a4b484abb8 100644
--- a/synapse/config/_util.py
+++ b/synapse/config/_util.py
@@ -33,6 +33,9 @@ def validate_config(
config: the configuration value to be validated
config_path: the path within the config file. This will be used as a basis
for the error message.
+
+ Raises:
+ ConfigError, if validation fails.
"""
try:
jsonschema.validate(config, json_schema)
diff --git a/synapse/config/api.py b/synapse/config/api.py
index e46728e73f0a..27d50d118f3f 100644
--- a/synapse/config/api.py
+++ b/synapse/config/api.py
@@ -13,12 +13,13 @@
# limitations under the License.
import logging
-from typing import Any, Iterable
+from typing import Any, Iterable, Optional, Tuple
from synapse.api.constants import EventTypes
from synapse.config._base import Config, ConfigError
from synapse.config._util import validate_config
from synapse.types import JsonDict
+from synapse.types.state import StateFilter
logger = logging.getLogger(__name__)
@@ -26,16 +27,20 @@
class ApiConfig(Config):
section = "api"
+ room_prejoin_state: StateFilter
+ track_puppetted_users_ips: bool
+
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
validate_config(_MAIN_SCHEMA, config, ())
- self.room_prejoin_state = list(self._get_prejoin_state_types(config))
+ self.room_prejoin_state = StateFilter.from_types(
+ self._get_prejoin_state_entries(config)
+ )
self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False)
- def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:
- """Get the event types to include in the prejoin state
-
- Parses the config and returns an iterable of the event types to be included.
- """
+ def _get_prejoin_state_entries(
+ self, config: JsonDict
+ ) -> Iterable[Tuple[str, Optional[str]]]:
+ """Get the event types and state keys to include in the prejoin state."""
room_prejoin_state_config = config.get("room_prejoin_state") or {}
# backwards-compatibility support for room_invite_state_types
@@ -50,33 +55,39 @@ def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:
logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)
- yield from config["room_invite_state_types"]
+ for event_type in config["room_invite_state_types"]:
+ yield event_type, None
return
if not room_prejoin_state_config.get("disable_default_event_types"):
- yield from _DEFAULT_PREJOIN_STATE_TYPES
+ yield from _DEFAULT_PREJOIN_STATE_TYPES_AND_STATE_KEYS
- yield from room_prejoin_state_config.get("additional_event_types", [])
+ for entry in room_prejoin_state_config.get("additional_event_types", []):
+ if isinstance(entry, str):
+ yield entry, None
+ else:
+ yield entry
_ROOM_INVITE_STATE_TYPES_WARNING = """\
WARNING: The 'room_invite_state_types' configuration setting is now deprecated,
and replaced with 'room_prejoin_state'. New features may not work correctly
-unless 'room_invite_state_types' is removed. See the sample configuration file for
-details of 'room_prejoin_state'.
+unless 'room_invite_state_types' is removed. See the config documentation at
+ https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_prejoin_state
+for details of 'room_prejoin_state'.
--------------------------------------------------------------------------------
"""
-_DEFAULT_PREJOIN_STATE_TYPES = [
- EventTypes.JoinRules,
- EventTypes.CanonicalAlias,
- EventTypes.RoomAvatar,
- EventTypes.RoomEncryption,
- EventTypes.Name,
+_DEFAULT_PREJOIN_STATE_TYPES_AND_STATE_KEYS = [
+ (EventTypes.JoinRules, ""),
+ (EventTypes.CanonicalAlias, ""),
+ (EventTypes.RoomAvatar, ""),
+ (EventTypes.RoomEncryption, ""),
+ (EventTypes.Name, ""),
# Per MSC1772.
- EventTypes.Create,
+ (EventTypes.Create, ""),
# Per MSC3173.
- EventTypes.Topic,
+ (EventTypes.Topic, ""),
]
@@ -90,7 +101,17 @@ def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:
"disable_default_event_types": {"type": "boolean"},
"additional_event_types": {
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 2,
+ "maxItems": 2,
+ },
+ ],
+ },
},
},
},
diff --git a/synapse/config/cache.py b/synapse/config/cache.py
index 2db8cfb0052b..eb4194a5a91b 100644
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -159,7 +159,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.track_memory_usage = cache_config.get("track_memory_usage", False)
if self.track_memory_usage:
- check_requirements("cache_memory")
+ check_requirements("cache-memory")
expire_caches = cache_config.get("expire_caches", True)
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index c35301207ae4..573fa0386fa7 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any
+from typing import Any, Optional
import attr
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config._base import Config
from synapse.types import JsonDict
@@ -53,9 +54,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# MSC3266 (room summary api)
self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
- # MSC3030 (Jump to date API endpoint)
- self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False)
-
# MSC2409 (this setting only relates to optionally sending to-device messages).
# Presence, typing and read receipt EDUs are already sent to application services that
# have opted in to receive them. If enabled, this adds to-device messages to that list.
@@ -95,13 +93,11 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# MSC2815 (allow room moderators to view redacted event content)
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
- # MSC3772: A push rule for mutual relations.
- self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
# MSC3773: Thread notifications
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
- # MSC3715: dir param on /relations.
- self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
+ # MSC3664: Pushrules to match on related events
+ self.msc3664_enabled: bool = experimental.get("msc3664_enabled", False)
# MSC3848: Introduce errcodes for specific event sending failures
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
@@ -122,3 +118,21 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.msc3882_token_timeout = self.parse_duration(
experimental.get("msc3882_token_timeout", "5m")
)
+
+ # MSC3874: Filtering /messages with rel_types / not_rel_types.
+ self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
+
+ # MSC3886: Simple client rendezvous capability
+ self.msc3886_endpoint: Optional[str] = experimental.get(
+ "msc3886_endpoint", None
+ )
+
+ # MSC3912: Relation-based redactions.
+ self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
+
+ # MSC1767 and friends: Extensible Events
+ self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False)
+ if self.msc1767_enabled:
+ # Enable room version (and thus applicable push rules from MSC3931/3932)
+ version_id = RoomVersions.MSC1767v10.identifier
+ KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10
diff --git a/synapse/config/groups.py b/synapse/config/groups.py
deleted file mode 100644
index baa051fdd47f..000000000000
--- a/synapse/config/groups.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2017 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Any
-
-from synapse.types import JsonDict
-
-from ._base import Config
-
-
-class GroupsConfig(Config):
- section = "groups"
-
- def read_config(self, config: JsonDict, **kwargs: Any) -> None:
- self.enable_group_creation = config.get("enable_group_creation", False)
- self.group_creation_prefix = config.get("group_creation_prefix", "")
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 6c1f78f8df89..5468b963a2c1 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -53,7 +53,7 @@
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
-# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
+# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
version: 1
@@ -317,15 +317,16 @@ def setup_logging(
Set up the logging subsystem.
Args:
- config (LoggingConfig | synapse.config.worker.WorkerConfig):
- configuration data
+ config: configuration data
- use_worker_options (bool): True to use the 'worker_log_config' option
+ use_worker_options: True to use the 'worker_log_config' option
instead of 'log_config'.
logBeginner: The Twisted logBeginner to use.
"""
+ from twisted.internet import reactor
+
log_config_path = (
config.worker.worker_log_config
if use_worker_options
@@ -348,3 +349,4 @@ def setup_logging(
)
logging.info("Server hostname: %s", config.server.server_name)
logging.info("Instance name: %s", hs.get_instance_name())
+ logging.info("Twisted reactor: %s", type(reactor).__name__)
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index bb065f9f2f14..8c1c9bd12d45 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -43,8 +43,6 @@ class MetricsConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False)
- self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
-
self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get(
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index 5418a332da14..0bd83f40100b 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -123,6 +123,8 @@ def oidc_enabled(self) -> bool:
"userinfo_endpoint": {"type": "string"},
"jwks_uri": {"type": "string"},
"skip_verification": {"type": "boolean"},
+ "backchannel_logout_enabled": {"type": "boolean"},
+ "backchannel_logout_ignore_sub": {"type": "boolean"},
"user_profile_method": {
"type": "string",
"enum": ["auto", "userinfo_endpoint"],
@@ -292,6 +294,10 @@ def _parse_oidc_config_dict(
token_endpoint=oidc_config.get("token_endpoint"),
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
jwks_uri=oidc_config.get("jwks_uri"),
+ backchannel_logout_enabled=oidc_config.get("backchannel_logout_enabled", False),
+ backchannel_logout_ignore_sub=oidc_config.get(
+ "backchannel_logout_ignore_sub", False
+ ),
skip_verification=oidc_config.get("skip_verification", False),
user_profile_method=oidc_config.get("user_profile_method", "auto"),
allow_existing_users=oidc_config.get("allow_existing_users", False),
@@ -368,6 +374,12 @@ class OidcProviderConfig:
# "openid" scope is used.
jwks_uri: Optional[str]
+ # Whether Synapse should react to backchannel logouts
+ backchannel_logout_enabled: bool
+
+ # Whether Synapse should ignore the `sub` claim in backchannel logouts or not.
+ backchannel_logout_ignore_sub: bool
+
# Whether to skip metadata verification
skip_verification: bool
diff --git a/synapse/config/push.py b/synapse/config/push.py
index 979b128eae8f..3b5378e6ea52 100644
--- a/synapse/config/push.py
+++ b/synapse/config/push.py
@@ -26,6 +26,7 @@ class PushConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
push_config = config.get("push") or {}
self.push_include_content = push_config.get("include_content", True)
+ self.enable_push = push_config.get("enabled", True)
self.push_group_unread_count_by_room = push_config.get(
"group_unread_count_by_room", True
)
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 1ed001e10553..5c13fe428a70 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -150,8 +150,5 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.rc_third_party_invite = RatelimitSettings(
config.get("rc_third_party_invite", {}),
- defaults={
- "per_second": self.rc_message.per_second,
- "burst_count": self.rc_message.burst_count,
- },
+ defaults={"per_second": 0.0025, "burst_count": 5},
)
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 1033496bb43d..e4759711ed95 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -205,7 +205,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
)
self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled:
- check_requirements("url_preview")
+ check_requirements("url-preview")
proxy_env = getproxies_environment()
if "url_preview_ip_range_blacklist" not in config:
diff --git a/synapse/config/server.py b/synapse/config/server.py
index f2353ce5fb0a..ec46ca63adf5 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -207,6 +207,9 @@ class HttpListenerConfig:
additional_resources: Dict[str, dict] = attr.Factory(dict)
tag: Optional[str] = None
request_id_header: Optional[str] = None
+ # If true, the listener will return CORS response headers compatible with MSC3886:
+ # https://github.com/matrix-org/matrix-spec-proposals/pull/3886
+ experimental_cors_msc3886: bool = False
@attr.s(slots=True, frozen=True, auto_attribs=True)
@@ -935,6 +938,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
request_id_header=listener.get("request_id_header"),
+ experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
)
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 0fb725dd8fc6..2580660b6c27 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -29,20 +29,6 @@
)
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
-_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
-The send_federation config option must be disabled in the main
-synapse process before they can be run in a separate worker.
-
-Please add ``send_federation: false`` to the main config
-"""
-
-_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """
-The start_pushers config option must be disabled in the main
-synapse process before they can be run in a separate worker.
-
-Please add ``start_pushers: false`` to the main config
-"""
-
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
The '%s' configuration option is deprecated and will be removed in a future
Synapse version. Please use ``%s: name_of_worker`` instead.
@@ -67,6 +53,7 @@ class InstanceLocationConfig:
host: str
port: int
+ tls: bool = False
@attr.s
@@ -149,13 +136,25 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
# The port on the main synapse for HTTP replication endpoint
self.worker_replication_http_port = config.get("worker_replication_http_port")
+ # The tls mode on the main synapse for HTTP replication endpoint.
+ # For backward compatibility this defaults to False.
+ self.worker_replication_http_tls = config.get(
+ "worker_replication_http_tls", False
+ )
+
# The shared secret used for authentication when connecting to the main synapse.
self.worker_replication_secret = config.get("worker_replication_secret", None)
self.worker_name = config.get("worker_name", self.worker_app)
self.instance_name = self.worker_name or "master"
+ # FIXME: Remove this check after a suitable amount of time.
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
+ if self.worker_main_http_uri is not None:
+ logger.warning(
+ "The config option worker_main_http_uri is unused since Synapse 1.73. "
+ "It can be safely removed from your configuration."
+ )
# This option is really only here to support `--manhole` command line
# argument.
@@ -169,40 +168,12 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
)
)
- # Handle federation sender configuration.
- #
- # There are two ways of configuring which instances handle federation
- # sending:
- # 1. The old way where "send_federation" is set to false and running a
- # `synapse.app.federation_sender` worker app.
- # 2. Specifying the workers sending federation in
- # `federation_sender_instances`.
- #
-
- send_federation = config.get("send_federation", True)
-
- federation_sender_instances = config.get("federation_sender_instances")
- if federation_sender_instances is None:
- # Default to an empty list, which means "another, unknown, worker is
- # responsible for it".
- federation_sender_instances = []
-
- # If no federation sender instances are set we check if
- # `send_federation` is set, which means use master
- if send_federation:
- federation_sender_instances = ["master"]
-
- if self.worker_app == "synapse.app.federation_sender":
- if send_federation:
- # If we're running federation senders, and not using
- # `federation_sender_instances`, then we should have
- # explicitly set `send_federation` to false.
- raise ConfigError(
- _FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR
- )
-
- federation_sender_instances = [self.worker_name]
-
+ federation_sender_instances = self._worker_names_performing_this_duty(
+ config,
+ "send_federation",
+ "synapse.app.federation_sender",
+ "federation_sender_instances",
+ )
self.send_federation = self.instance_name in federation_sender_instances
self.federation_shard_config = ShardedWorkerHandlingConfig(
federation_sender_instances
@@ -269,27 +240,12 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None:
)
# Handle sharded push
- start_pushers = config.get("start_pushers", True)
- pusher_instances = config.get("pusher_instances")
- if pusher_instances is None:
- # Default to an empty list, which means "another, unknown, worker is
- # responsible for it".
- pusher_instances = []
-
- # If no pushers instances are set we check if `start_pushers` is
- # set, which means use master
- if start_pushers:
- pusher_instances = ["master"]
-
- if self.worker_app == "synapse.app.pusher":
- if start_pushers:
- # If we're running pushers, and not using
- # `pusher_instances`, then we should have explicitly set
- # `start_pushers` to false.
- raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR)
-
- pusher_instances = [self.instance_name]
-
+ pusher_instances = self._worker_names_performing_this_duty(
+ config,
+ "start_pushers",
+ "synapse.app.pusher",
+ "pusher_instances",
+ )
self.start_pushers = self.instance_name in pusher_instances
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
@@ -412,6 +368,64 @@ def _should_this_worker_perform_duty(
# (By this point, these are either the same value or only one is not None.)
return bool(new_option_should_run_here or legacy_option_should_run_here)
+ def _worker_names_performing_this_duty(
+ self,
+ config: Dict[str, Any],
+ legacy_option_name: str,
+ legacy_app_name: str,
+ modern_instance_list_name: str,
+ ) -> List[str]:
+ """
+ Retrieves the names of the workers handling a given duty, by either legacy
+ option or instance list.
+
+ There are two ways of configuring which instances handle a given duty, e.g.
+ for configuring pushers:
+
+ 1. The old way where "start_pushers" is set to false and running a
+ `synapse.app.pusher'` worker app.
+ 2. Specifying the workers sending federation in `pusher_instances`.
+
+ Args:
+ config: settings read from yaml.
+ legacy_option_name: the old way of enabling options. e.g. 'start_pushers'
+ legacy_app_name: The historical app name. e.g. 'synapse.app.pusher'
+ modern_instance_list_name: the string name of the new instance_list. e.g.
+ 'pusher_instances'
+
+ Returns:
+ A list of worker instance names handling the given duty.
+ """
+
+ legacy_option = config.get(legacy_option_name, True)
+
+ worker_instances = config.get(modern_instance_list_name)
+ if worker_instances is None:
+ # Default to an empty list, which means "another, unknown, worker is
+ # responsible for it".
+ worker_instances = []
+
+ # If no worker instances are set we check if the legacy option
+ # is set, which means use the main process.
+ if legacy_option:
+ worker_instances = ["master"]
+
+ if self.worker_app == legacy_app_name:
+ if legacy_option:
+ # If we're using `legacy_app_name`, and not using
+ # `modern_instance_list_name`, then we should have
+ # explicitly set `legacy_option_name` to false.
+ raise ConfigError(
+ f"The '{legacy_option_name}' config option must be disabled in "
+ "the main synapse process before they can be run in a separate "
+ "worker.\n"
+ f"Please add `{legacy_option_name}: false` to the main config.\n",
+ )
+
+ worker_instances = [self.worker_name]
+
+ return worker_instances
+
def read_arguments(self, args: argparse.Namespace) -> None:
# We support a bunch of command line arguments that override options in
# the config. A lot of these options have a worker_* prefix when running
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index c88afb298620..69310d90351c 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -14,7 +14,6 @@
import abc
import logging
-import urllib
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
import attr
@@ -213,7 +212,7 @@ async def verify_json_for_server(
def verify_json_objects_for_server(
self, server_and_json: Iterable[Tuple[str, dict, int]]
- ) -> List[defer.Deferred]:
+ ) -> List["defer.Deferred[None]"]:
"""Bulk verifies signatures of json objects, bulk fetching keys as
necessary.
@@ -226,10 +225,9 @@ def verify_json_objects_for_server(
valid.
Returns:
- List: for each input triplet, a deferred indicating success
- or failure to verify each json object's signature for the given
- server_name. The deferreds run their callbacks in the sentinel
- logcontext.
+ For each input triplet, a deferred indicating success or failure to
+ verify each json object's signature for the given server_name. The
+ deferreds run their callbacks in the sentinel logcontext.
"""
return [
run_in_background(
@@ -814,31 +812,27 @@ async def _fetch_keys(
results = {}
- async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None:
+ async def get_keys(key_to_fetch_item: _FetchKeyRequest) -> None:
server_name = key_to_fetch_item.server_name
- key_ids = key_to_fetch_item.key_ids
try:
- keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
+ keys = await self.get_server_verify_keys_v2_direct(server_name)
results[server_name] = keys
except KeyLookupError as e:
- logger.warning(
- "Error looking up keys %s from %s: %s", key_ids, server_name, e
- )
+ logger.warning("Error looking up keys from %s: %s", server_name, e)
except Exception:
- logger.exception("Error getting keys %s from %s", key_ids, server_name)
+ logger.exception("Error getting keys from %s", server_name)
- await yieldable_gather_results(get_key, keys_to_fetch)
+ await yieldable_gather_results(get_keys, keys_to_fetch)
return results
- async def get_server_verify_key_v2_direct(
- self, server_name: str, key_ids: Iterable[str]
+ async def get_server_verify_keys_v2_direct(
+ self, server_name: str
) -> Dict[str, FetchKeyResult]:
"""
Args:
- server_name:
- key_ids:
+ server_name: Server to request keys from
Returns:
Map from key ID to lookup result
@@ -846,57 +840,41 @@ async def get_server_verify_key_v2_direct(
Raises:
KeyLookupError if there was a problem making the lookup
"""
- keys: Dict[str, FetchKeyResult] = {}
-
- for requested_key_id in key_ids:
- # we may have found this key as a side-effect of asking for another.
- if requested_key_id in keys:
- continue
-
- time_now_ms = self.clock.time_msec()
- try:
- response = await self.client.get_json(
- destination=server_name,
- path="/_matrix/key/v2/server/"
- + urllib.parse.quote(requested_key_id),
- ignore_backoff=True,
- # we only give the remote server 10s to respond. It should be an
- # easy request to handle, so if it doesn't reply within 10s, it's
- # probably not going to.
- #
- # Furthermore, when we are acting as a notary server, we cannot
- # wait all day for all of the origin servers, as the requesting
- # server will otherwise time out before we can respond.
- #
- # (Note that get_json may make 4 attempts, so this can still take
- # almost 45 seconds to fetch the headers, plus up to another 60s to
- # read the response).
- timeout=10000,
- )
- except (NotRetryingDestination, RequestSendFailed) as e:
- # these both have str() representations which we can't really improve
- # upon
- raise KeyLookupError(str(e))
- except HttpResponseException as e:
- raise KeyLookupError("Remote server returned an error: %s" % (e,))
-
- assert isinstance(response, dict)
- if response["server_name"] != server_name:
- raise KeyLookupError(
- "Expected a response for server %r not %r"
- % (server_name, response["server_name"])
- )
-
- response_keys = await self.process_v2_response(
- from_server=server_name,
- response_json=response,
- time_added_ms=time_now_ms,
+ time_now_ms = self.clock.time_msec()
+ try:
+ response = await self.client.get_json(
+ destination=server_name,
+ path="/_matrix/key/v2/server",
+ ignore_backoff=True,
+ # we only give the remote server 10s to respond. It should be an
+ # easy request to handle, so if it doesn't reply within 10s, it's
+ # probably not going to.
+ #
+ # Furthermore, when we are acting as a notary server, we cannot
+ # wait all day for all of the origin servers, as the requesting
+ # server will otherwise time out before we can respond.
+ #
+ # (Note that get_json may make 4 attempts, so this can still take
+ # almost 45 seconds to fetch the headers, plus up to another 60s to
+ # read the response).
+ timeout=10000,
)
- await self.store.store_server_verify_keys(
- server_name,
- time_now_ms,
- ((server_name, key_id, key) for key_id, key in response_keys.items()),
+ except (NotRetryingDestination, RequestSendFailed) as e:
+ # these both have str() representations which we can't really improve
+ # upon
+ raise KeyLookupError(str(e))
+ except HttpResponseException as e:
+ raise KeyLookupError("Remote server returned an error: %s" % (e,))
+
+ assert isinstance(response, dict)
+ if response["server_name"] != server_name:
+ raise KeyLookupError(
+ "Expected a response for server %r not %r"
+ % (server_name, response["server_name"])
)
- keys.update(response_keys)
- return keys
+ return await self.process_v2_response(
+ from_server=server_name,
+ response_json=response,
+ time_added_ms=time_now_ms,
+ )
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index c7d5ef92fc00..d437b7e5d1d2 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -15,7 +15,18 @@
import logging
import typing
-from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple, Union
+from typing import (
+ Any,
+ Collection,
+ Dict,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+)
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
@@ -41,6 +52,7 @@
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersion,
+ RoomVersions,
)
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import MutableStateMap, StateMap, UserID, get_domain_from_id
@@ -134,6 +146,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
async def check_state_independent_auth_rules(
store: _EventSourceStore,
event: "EventBase",
+ batched_auth_events: Optional[Mapping[str, "EventBase"]] = None,
) -> None:
"""Check that an event complies with auth rules that are independent of room state
@@ -143,6 +156,8 @@ async def check_state_independent_auth_rules(
Args:
store: the datastore; used to fetch the auth events for validation
event: the event being checked.
+ batched_auth_events: if the event being authed is part of a batch, any events
+ from the same batch that may be necessary to auth the current event
Raises:
AuthError if the checks fail
@@ -162,6 +177,9 @@ async def check_state_independent_auth_rules(
redact_behaviour=EventRedactBehaviour.as_is,
allow_rejected=True,
)
+ if batched_auth_events:
+ auth_events.update(batched_auth_events)
+
room_id = event.room_id
auth_dict: MutableStateMap[str] = {}
expected_auth_types = auth_types_for_event(event.room_version, event)
@@ -324,19 +342,80 @@ def check_state_dependent_auth_rules(
logger.debug("Allowing! %s", event)
+# Set of room versions where Synapse did not apply event key size limits
+# in bytes, but rather in codepoints.
+# In these room versions, we are more lenient with event size validation.
+LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = {
+ RoomVersions.V1,
+ RoomVersions.V2,
+ RoomVersions.V3,
+ RoomVersions.V4,
+ RoomVersions.V5,
+ RoomVersions.V6,
+ RoomVersions.MSC2176,
+ RoomVersions.V7,
+ RoomVersions.V8,
+ RoomVersions.V9,
+ RoomVersions.MSC3787,
+ RoomVersions.V10,
+ RoomVersions.MSC2716v4,
+ RoomVersions.MSC1767v10,
+}
+
+
def _check_size_limits(event: "EventBase") -> None:
+ """
+ Checks the size limits in a PDU.
+
+ The entire size limit of the PDU is checked first.
+ Then the size of fields is checked, first in codepoints and then in bytes.
+
+ The codepoint size limits are only for Synapse compatibility.
+
+ Raises:
+ EventSizeError:
+ when a size limit has been violated.
+
+ unpersistable=True if Synapse never would have accepted the event and
+ the PDU must NOT be persisted.
+
+ unpersistable=False if a prior version of Synapse would have accepted the
+ event and so the PDU must be persisted as rejected to avoid
+ breaking the room.
+ """
+
+ # Whole PDU check
+ if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
+ raise EventSizeError("event too large", unpersistable=True)
+
+ # Codepoint size check: Synapse always enforced these limits, so apply
+ # them strictly.
if len(event.user_id) > 255:
- raise EventSizeError("'user_id' too large")
+ raise EventSizeError("'user_id' too large", unpersistable=True)
if len(event.room_id) > 255:
- raise EventSizeError("'room_id' too large")
+ raise EventSizeError("'room_id' too large", unpersistable=True)
if event.is_state() and len(event.state_key) > 255:
- raise EventSizeError("'state_key' too large")
+ raise EventSizeError("'state_key' too large", unpersistable=True)
if len(event.type) > 255:
- raise EventSizeError("'type' too large")
+ raise EventSizeError("'type' too large", unpersistable=True)
if len(event.event_id) > 255:
- raise EventSizeError("'event_id' too large")
- if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
- raise EventSizeError("event too large")
+ raise EventSizeError("'event_id' too large", unpersistable=True)
+
+ strict_byte_limits = (
+ event.room_version not in LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS
+ )
+
+ # Byte size check: if these fail, then be lenient to avoid breaking rooms.
+ if len(event.user_id.encode("utf-8")) > 255:
+ raise EventSizeError("'user_id' too large", unpersistable=strict_byte_limits)
+ if len(event.room_id.encode("utf-8")) > 255:
+ raise EventSizeError("'room_id' too large", unpersistable=strict_byte_limits)
+ if event.is_state() and len(event.state_key.encode("utf-8")) > 255:
+ raise EventSizeError("'state_key' too large", unpersistable=strict_byte_limits)
+ if len(event.type.encode("utf-8")) > 255:
+ raise EventSizeError("'type' too large", unpersistable=strict_byte_limits)
+ if len(event.event_id.encode("utf-8")) > 255:
+ raise EventSizeError("'event_id' too large", unpersistable=strict_byte_limits)
def _check_create(event: "EventBase") -> None:
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 030c3ca408c0..8aca9a3ab9e9 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -597,8 +597,7 @@ def _event_type_from_format_version(
format_version: The event format version
Returns:
- type: A type that can be initialized as per the initializer of
- `FrozenEvent`
+ A type that can be initialized as per the initializer of `FrozenEvent`
"""
if format_version == EventFormatVersions.ROOM_V1_V2:
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index e2ee10dd3ddc..94dd1298e177 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -28,8 +28,8 @@
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
from synapse.state import StateHandler
from synapse.storage.databases.main import DataStore
-from synapse.storage.state import StateFilter
from synapse.types import EventID, JsonDict
+from synapse.types.state import StateFilter
from synapse.util import Clock
from synapse.util.stringutils import random_string
@@ -128,6 +128,7 @@ async def build(
state_filter=StateFilter.from_types(
auth_types_for_event(self.room_version, self)
),
+ await_full_state=False,
)
auth_event_ids = self._event_auth_handler.compute_auth_events(
self, state_ids
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index d3c8083e4ab8..6eaef8b57a03 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -23,7 +23,7 @@
if TYPE_CHECKING:
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
- from synapse.storage.state import StateFilter
+ from synapse.types.state import StateFilter
@attr.s(slots=True, auto_attribs=True)
@@ -65,7 +65,8 @@ class EventContext:
None does not necessarily mean that ``state_group`` does not have
a prev_group!
- If the event is a state event, this is normally the same as ``prev_group``.
+ If the event is a state event, this is normally the same as
+ ``state_group_before_event``.
If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
will always also be ``None``.
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 71853caad8c7..13fa93afb87b 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -28,8 +28,14 @@
)
import attr
+from canonicaljson import encode_canonical_json
-from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
+from synapse.api.constants import (
+ MAX_PDU_SIZE,
+ EventContentFields,
+ EventTypes,
+ RelationTypes,
+)
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.types import JsonDict
@@ -674,3 +680,27 @@ def validate_canonicaljson(value: Any) -> None:
elif not isinstance(value, (bool, str)) and value is not None:
# Other potential JSON values (bool, None, str) are safe.
raise SynapseError(400, "Unknown JSON value", Codes.BAD_JSON)
+
+
+def maybe_upsert_event_field(
+ event: EventBase, container: JsonDict, key: str, value: object
+) -> bool:
+ """Upsert an event field, but only if this doesn't make the event too large.
+
+ Returns true iff the upsert took place.
+ """
+ if key in container:
+ old_value: object = container[key]
+ container[key] = value
+ # NB: here and below, we assume that passing a non-None `time_now` argument to
+ # get_pdu_json doesn't increase the size of the encoded result.
+ upsert_okay = len(encode_canonical_json(event.get_pdu_json())) <= MAX_PDU_SIZE
+ if not upsert_okay:
+ container[key] = old_value
+ else:
+ container[key] = value
+ upsert_okay = len(encode_canonical_json(event.get_pdu_json())) <= MAX_PDU_SIZE
+ if not upsert_okay:
+ del container[key]
+
+ return upsert_okay
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 4dca711cd28d..137cfb3346d2 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -80,6 +80,18 @@
T = TypeVar("T")
+@attr.s(frozen=True, slots=True, auto_attribs=True)
+class PulledPduInfo:
+ """
+ A result object that stores the PDU and info about it like which homeserver we
+ pulled it from (`pull_origin`)
+ """
+
+ pdu: EventBase
+ # Which homeserver we pulled the PDU from
+ pull_origin: str
+
+
class InvalidResponseError(RuntimeError):
"""Helper for _try_destination_list: indicates that the server returned a response
we couldn't parse
@@ -114,7 +126,9 @@ def __init__(self, hs: "HomeServer"):
self.hostname = hs.hostname
self.signing_key = hs.signing_key
- self._get_pdu_cache: ExpiringCache[str, EventBase] = ExpiringCache(
+ # Cache mapping `event_id` to a tuple of the event itself and the `pull_origin`
+ # (which server we pulled the event from)
+ self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
@@ -352,11 +366,11 @@ async def _record_failure_callback(
@tag_args
async def get_pdu(
self,
- destinations: Iterable[str],
+ destinations: Collection[str],
event_id: str,
room_version: RoomVersion,
timeout: Optional[int] = None,
- ) -> Optional[EventBase]:
+ ) -> Optional[PulledPduInfo]:
"""Requests the PDU with given origin and ID from the remote home
servers.
@@ -371,11 +385,11 @@ async def get_pdu(
moving to the next destination. None indicates no timeout.
Returns:
- The requested PDU, or None if we were unable to find it.
+ The requested PDU wrapped in `PulledPduInfo`, or None if we were unable to find it.
"""
logger.debug(
- "get_pdu: event_id=%s from destinations=%s", event_id, destinations
+ "get_pdu(event_id=%s): from destinations=%s", event_id, destinations
)
# TODO: Rate limit the number of times we try and get the same event.
@@ -384,19 +398,25 @@ async def get_pdu(
# it gets persisted to the database), so we cache the results of the lookup.
# Note that this is separate to the regular get_event cache which caches
# events once they have been persisted.
- event = self._get_pdu_cache.get(event_id)
+ get_pdu_cache_entry = self._get_pdu_cache.get(event_id)
+ event = None
+ pull_origin = None
+ if get_pdu_cache_entry:
+ event, pull_origin = get_pdu_cache_entry
# If we don't see the event in the cache, go try to fetch it from the
# provided remote federated destinations
- if not event:
+ else:
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
+ # TODO: We can probably refactor this to use `_try_destination_list`
for destination in destinations:
now = self._clock.time_msec()
last_attempt = pdu_attempts.get(destination, 0)
if last_attempt + PDU_RETRY_TIME_MS > now:
logger.debug(
- "get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
+ "get_pdu(event_id=%s): skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
+ event_id,
destination,
last_attempt,
PDU_RETRY_TIME_MS,
@@ -411,43 +431,48 @@ async def get_pdu(
room_version=room_version,
timeout=timeout,
)
+ pull_origin = destination
pdu_attempts[destination] = now
if event:
# Prime the cache
- self._get_pdu_cache[event.event_id] = event
+ self._get_pdu_cache[event.event_id] = (event, pull_origin)
# Now that we have an event, we can break out of this
# loop and stop asking other destinations.
break
+ except NotRetryingDestination as e:
+ logger.info("get_pdu(event_id=%s): %s", event_id, e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "get_pdu(event_id=%s): Not attempting to fetch PDU from %s because the homeserver is not on our federation whitelist",
+ event_id,
+ destination,
+ )
+ continue
except SynapseError as e:
logger.info(
- "Failed to get PDU %s from %s because %s",
+ "get_pdu(event_id=%s): Failed to get PDU from %s because %s",
event_id,
destination,
e,
)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
- except FederationDeniedError as e:
- logger.info(str(e))
- continue
except Exception as e:
pdu_attempts[destination] = now
logger.info(
- "Failed to get PDU %s from %s because %s",
+ "get_pdu(event_id=%s): Failed to get PDU from %s because %s",
event_id,
destination,
e,
)
continue
- if not event:
+ if not event or not pull_origin:
return None
# `event` now refers to an object stored in `get_pdu_cache`. Our
@@ -459,7 +484,7 @@ async def get_pdu(
event.room_version,
)
- return event_copy
+ return PulledPduInfo(event_copy, pull_origin)
@trace
@tag_args
@@ -699,12 +724,14 @@ async def _check_sigs_and_hash_and_fetch_one(
pdu_origin = get_domain_from_id(pdu.sender)
if not res and pdu_origin != origin:
try:
- res = await self.get_pdu(
+ pulled_pdu_info = await self.get_pdu(
destinations=[pdu_origin],
event_id=pdu.event_id,
room_version=room_version,
timeout=10000,
)
+ if pulled_pdu_info is not None:
+ res = pulled_pdu_info.pdu
except SynapseError:
pass
@@ -744,17 +771,28 @@ def _is_unknown_endpoint(
"""
if synapse_error is None:
synapse_error = e.to_synapse_error()
- # There is no good way to detect an "unknown" endpoint.
- #
- # Dendrite returns a 404 (with a body of "404 page not found");
- # Conduit returns a 404 (with no body); and Synapse returns a 400
- # with M_UNRECOGNIZED.
+ # MSC3743 specifies that servers should return a 404 or 405 with an errcode
+ # of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
+ # to an unknown method, respectively.
#
- # This needs to be rather specific as some endpoints truly do return 404
- # errors.
+ # Older versions of servers don't properly handle this. This needs to be
+ # rather specific as some endpoints truly do return 404 errors.
return (
- e.code == 404 and (not e.response or e.response == b"404 page not found")
- ) or (e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED)
+ # 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
+ (e.code == 404 or e.code == 405)
+ and (
+ # Older Dendrites returned a text or empty body.
+ # Older Conduit returned an empty body.
+ not e.response
+ or e.response == b"404 page not found"
+ # The proper response JSON with M_UNRECOGNIZED errcode.
+ or synapse_error.errcode == Codes.UNRECOGNIZED
+ )
+ ) or (
+ # Older Synapses returned a 400 error.
+ e.code == 400
+ and synapse_error.errcode == Codes.UNRECOGNIZED
+ )
async def _try_destination_list(
self,
@@ -806,6 +844,7 @@ async def _try_destination_list(
)
for destination in destinations:
+ # We don't want to ask our own server for information we don't have
if destination == self.server_name:
continue
@@ -814,9 +853,21 @@ async def _try_destination_list(
except (
RequestSendFailed,
InvalidResponseError,
- NotRetryingDestination,
) as e:
logger.warning("Failed to %s via %s: %s", description, destination, e)
+ # Skip to the next homeserver in the list to try.
+ continue
+ except NotRetryingDestination as e:
+ logger.info("%s: %s", description, e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "%s: Not attempting to %s from %s because the homeserver is not on our federation whitelist",
+ description,
+ description,
+ destination,
+ )
+ continue
except UnsupportedRoomVersionError:
raise
except HttpResponseException as e:
@@ -1294,7 +1345,7 @@ async def _do_send_leave(self, destination: str, pdu: EventBase) -> JsonDict:
return resp[1]
async def send_knock(self, destinations: List[str], pdu: EventBase) -> JsonDict:
- """Attempts to send a knock event to given a list of servers. Iterates
+ """Attempts to send a knock event to a given list of servers. Iterates
through the list until one attempt succeeds.
Doing so will cause the remote server to add the event to the graph,
@@ -1609,6 +1660,64 @@ async def send_request(
return result
async def timestamp_to_event(
+ self, *, destinations: List[str], room_id: str, timestamp: int, direction: str
+ ) -> Optional["TimestampToEventResponse"]:
+ """
+ Calls each remote federating server from `destinations` asking for their closest
+ event to the given timestamp in the given direction until we get a response.
+ Also validates the response to always return the expected keys or raises an
+ error.
+
+ Args:
+ destinations: The domains of homeservers to try fetching from
+ room_id: Room to fetch the event from
+ timestamp: The point in time (inclusive) we should navigate from in
+ the given direction to find the closest event.
+ direction: ["f"|"b"] to indicate whether we should navigate forward
+ or backward from the given timestamp to find the closest event.
+
+ Returns:
+ A parsed TimestampToEventResponse including the closest event_id
+ and origin_server_ts or None if no destination has a response.
+ """
+
+ async def _timestamp_to_event_from_destination(
+ destination: str,
+ ) -> TimestampToEventResponse:
+ return await self._timestamp_to_event_from_destination(
+ destination, room_id, timestamp, direction
+ )
+
+ try:
+ # Loop through each homeserver candidate until we get a succesful response
+ timestamp_to_event_response = await self._try_destination_list(
+ "timestamp_to_event",
+ destinations,
+ # TODO: The requested timestamp may lie in a part of the
+ # event graph that the remote server *also* didn't have,
+ # in which case they will have returned another event
+ # which may be nowhere near the requested timestamp. In
+ # the future, we may need to reconcile that gap and ask
+ # other homeservers, and/or extend `/timestamp_to_event`
+ # to return events on *both* sides of the timestamp to
+ # help reconcile the gap faster.
+ _timestamp_to_event_from_destination,
+ # Since this endpoint is new, we should try other servers before giving up.
+ # We can safely remove this in a year (remove after 2023-11-16).
+ failover_on_unknown_endpoint=True,
+ )
+ return timestamp_to_event_response
+ except SynapseError as e:
+ logger.warn(
+ "timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
+ room_id,
+ timestamp,
+ direction,
+ e,
+ )
+ return None
+
+ async def _timestamp_to_event_from_destination(
self, destination: str, room_id: str, timestamp: int, direction: str
) -> "TimestampToEventResponse":
"""
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 907940e19eb0..bb20af6e91ed 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -74,6 +74,8 @@
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.lock import Lock
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
+from synapse.storage.roommember import MemberSummary
from synapse.types import JsonDict, StateMap, get_domain_from_id
from synapse.util import json_decoder, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
@@ -481,6 +483,14 @@ async def process_pdus_for_room(room_id: str) -> None:
pdu_results[pdu.event_id] = await process_pdu(pdu)
async def process_pdu(pdu: EventBase) -> JsonDict:
+ """
+ Processes a pushed PDU sent to us via a `/send` transaction
+
+ Returns:
+ JsonDict representing a "PDU Processing Result" that will be bundled up
+ with the other processed PDU's in the `/send` transaction and sent back
+ to remote homeserver.
+ """
event_id = pdu.event_id
with nested_logging_context(event_id):
try:
@@ -683,8 +693,9 @@ async def on_send_join_request(
state_event_ids: Collection[str]
servers_in_room: Optional[Collection[str]]
if caller_supports_partial_state:
+ summary = await self.store.get_room_summary(room_id)
state_event_ids = _get_event_ids_for_partial_state_join(
- event, prev_state_ids
+ event, prev_state_ids, summary
)
servers_in_room = await self.state.get_hosts_in_room_at_events(
room_id, event_ids=event.prev_event_ids()
@@ -824,7 +835,14 @@ async def on_send_knock_request(
context, self._room_prejoin_state_types
)
)
- return {"knock_state_events": stripped_room_state}
+ return {
+ "knock_room_state": stripped_room_state,
+ # Since v1.37, Synapse incorrectly used "knock_state_events" for this field.
+ # Thus, we also populate a 'knock_state_events' with the same content to
+ # support old instances.
+ # See https://github.com/matrix-org/synapse/issues/14088.
+ "knock_state_events": stripped_room_state,
+ }
async def _on_send_membership_event(
self, origin: str, content: JsonDict, membership_type: str, room_id: str
@@ -1480,6 +1498,7 @@ async def on_query(self, query_type: str, args: dict) -> JsonDict:
def _get_event_ids_for_partial_state_join(
join_event: EventBase,
prev_state_ids: StateMap[str],
+ summary: Dict[str, MemberSummary],
) -> Collection[str]:
"""Calculate state to be retuned in a partial_state send_join
@@ -1506,8 +1525,19 @@ def _get_event_ids_for_partial_state_join(
if current_membership_event_id is not None:
state_event_ids.add(current_membership_event_id)
- # TODO: return a few more members:
- # - those with invites
- # - those that are kicked? / banned
+ name_id = prev_state_ids.get((EventTypes.Name, ""))
+ canonical_alias_id = prev_state_ids.get((EventTypes.CanonicalAlias, ""))
+ if not name_id and not canonical_alias_id:
+ # Also include the hero members of the room (for DM rooms without a title).
+ # To do this properly, we should select the correct subset of membership events
+ # from `prev_state_ids`. Instead, we are lazier and use the (cached)
+ # `get_room_summary` function, which is based on the current state of the room.
+ # This introduces races; we choose to ignore them because a) they should be rare
+ # and b) even if it's wrong, joining servers will get the full state eventually.
+ heroes = extract_heroes_from_room_summary(summary, join_event.state_key)
+ for hero in heroes:
+ membership_event_id = prev_state_ids.get((EventTypes.Member, hero))
+ if membership_event_id:
+ state_event_ids.add(membership_event_id)
return state_event_ids
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index a6cb3ba58f0b..30ebd62883b1 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -353,21 +353,25 @@ async def _process_event_queue_loop(self) -> None:
last_token = await self.store.get_federation_out_pos("events")
(
next_token,
- events,
event_to_received_ts,
- ) = await self.store.get_all_new_events_stream(
+ ) = await self.store.get_all_new_event_ids_stream(
last_token, self._last_poked_id, limit=100
)
+ event_ids = event_to_received_ts.keys()
+ event_entries = await self.store.get_unredacted_events_from_cache_or_db(
+ event_ids
+ )
+
logger.debug(
"Handling %i -> %i: %i events to send (current id %i)",
last_token,
next_token,
- len(events),
+ len(event_entries),
self._last_poked_id,
)
- if not events and next_token >= self._last_poked_id:
+ if not event_entries and next_token >= self._last_poked_id:
logger.debug("All events processed")
break
@@ -430,7 +434,23 @@ async def handle_event(event: EventBase) -> None:
# If there are no prev event IDs then the state is empty
# and so no remote servers in the room
destinations = set()
- else:
+
+ if destinations is None:
+ # During partial join we use the set of servers that we got
+ # when beginning the join. It's still possible that we send
+ # events to servers that left the room in the meantime, but
+ # we consider that an acceptable risk since it is only our own
+ # events that we leak and not other server's ones.
+ partial_state_destinations = (
+ await self.store.get_partial_state_servers_at_join(
+ event.room_id
+ )
+ )
+
+ if len(partial_state_destinations) > 0:
+ destinations = partial_state_destinations
+
+ if destinations is None:
# We check the external cache for the destinations, which is
# stored per state group.
@@ -508,8 +528,14 @@ async def handle_room_events(events: List[EventBase]) -> None:
await handle_event(event)
events_by_room: Dict[str, List[EventBase]] = {}
- for event in events:
- events_by_room.setdefault(event.room_id, []).append(event)
+
+ for event_id in event_ids:
+ # `event_entries` is unsorted, so we have to iterate over `event_ids`
+ # to ensure the events are in the right order
+ event_cache = event_entries.get(event_id)
+ if event_cache:
+ event = event_cache.event
+ events_by_room.setdefault(event.room_id, []).append(event)
await make_deferred_yieldable(
defer.gatherResults(
@@ -524,9 +550,9 @@ async def handle_room_events(events: List[EventBase]) -> None:
logger.debug("Successfully handled up to %i", next_token)
await self.store.update_federation_out_pos("events", next_token)
- if events:
+ if event_entries:
now = self.clock.time_msec()
- ts = event_to_received_ts[events[-1].event_id]
+ ts = max(t for t in event_to_received_ts.values() if t)
assert ts is not None
synapse.metrics.event_processing_lag.labels(
@@ -536,7 +562,7 @@ async def handle_room_events(events: List[EventBase]) -> None:
"federation_sender"
).set(ts)
- events_processed_counter.inc(len(events))
+ events_processed_counter.inc(len(event_entries))
event_processing_loop_room_count.labels("federation_sender").inc(
len(events_by_room)
@@ -621,7 +647,7 @@ async def send_read_receipt(self, receipt: ReadReceipt) -> None:
room_id = receipt.room_id
# Work out which remote servers should be poked and poke them.
- domains_set = await self._storage_controllers.state.get_current_hosts_in_room(
+ domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
domains = [
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 084c45a95ca1..ffc9d95ee703 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -35,7 +35,7 @@
from synapse.logging.opentracing import SynapseTags, set_tag
from synapse.metrics import sent_transactions_counter
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import ReadReceipt
+from synapse.types import JsonDict, ReadReceipt
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
from synapse.visibility import filter_events_for_server
@@ -136,8 +136,11 @@ def __init__(
# destination
self._pending_presence: Dict[str, UserPresenceState] = {}
- # room_id -> receipt_type -> user_id -> receipt_dict
- self._pending_rrs: Dict[str, Dict[str, Dict[str, dict]]] = {}
+ # List of room_id -> receipt_type -> user_id -> receipt_dict,
+ #
+ # Each receipt can only have a single receipt per
+ # (room ID, receipt type, user ID, thread ID) tuple.
+ self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
self._rrs_pending_flush = False
# stream_id of last successfully sent to-device message.
@@ -202,17 +205,53 @@ def queue_read_receipt(self, receipt: ReadReceipt) -> None:
Args:
receipt: receipt to be queued
"""
- self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
- receipt.receipt_type, {}
- )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
+ serialized_receipt: JsonDict = {
+ "event_ids": receipt.event_ids,
+ "data": receipt.data,
+ }
+ if receipt.thread_id is not None:
+ serialized_receipt["data"]["thread_id"] = receipt.thread_id
+
+ # Find which EDU to add this receipt to. There's three situations depending
+ # on the (room ID, receipt type, user, thread ID) tuple:
+ #
+ # 1. If it fully matches, clobber the information.
+ # 2. If it is missing, add the information.
+ # 3. If the subset tuple of (room ID, receipt type, user) matches, check
+ # the next EDU (or add a new EDU).
+ for edu in self._pending_receipt_edus:
+ receipt_content = edu.setdefault(receipt.room_id, {}).setdefault(
+ receipt.receipt_type, {}
+ )
+ # If this room ID, receipt type, user ID is not in this EDU, OR if
+ # the full tuple matches, use the current EDU.
+ if (
+ receipt.user_id not in receipt_content
+ or receipt_content[receipt.user_id].get("thread_id")
+ == receipt.thread_id
+ ):
+ receipt_content[receipt.user_id] = serialized_receipt
+ break
+
+ # If no matching EDU was found, create a new one.
+ else:
+ self._pending_receipt_edus.append(
+ {
+ receipt.room_id: {
+ receipt.receipt_type: {receipt.user_id: serialized_receipt}
+ }
+ }
+ )
def flush_read_receipts_for_room(self, room_id: str) -> None:
- # if we don't have any read-receipts for this room, it may be that we've already
- # sent them out, so we don't need to flush.
- if room_id not in self._pending_rrs:
- return
- self._rrs_pending_flush = True
- self.attempt_new_transaction()
+ # If there are any pending receipts for this room then force-flush them
+ # in a new transaction.
+ for edu in self._pending_receipt_edus:
+ if room_id in edu:
+ self._rrs_pending_flush = True
+ self.attempt_new_transaction()
+ # No use in checking remaining EDUs if the room was found.
+ break
def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
self._pending_edus_keyed[(edu.edu_type, key)] = edu
@@ -351,7 +390,7 @@ async def _transaction_transmission_loop(self) -> None:
self._pending_edus = []
self._pending_edus_keyed = {}
self._pending_presence = {}
- self._pending_rrs = {}
+ self._pending_receipt_edus = []
self._start_catching_up()
except FederationDeniedError as e:
@@ -505,6 +544,7 @@ async def _catch_up_transmission_loop(self) -> None:
new_pdus = await filter_events_for_server(
self._storage_controllers,
self._destination,
+ self._server_name,
new_pdus,
redact=False,
)
@@ -542,22 +582,27 @@ async def _catch_up_transmission_loop(self) -> None:
self._destination, last_successful_stream_ordering
)
- def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
- if not self._pending_rrs:
+ def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
+ if not self._pending_receipt_edus:
return
if not force_flush and not self._rrs_pending_flush:
# not yet time for this lot
return
- edu = Edu(
- origin=self._server_name,
- destination=self._destination,
- edu_type=EduTypes.RECEIPT,
- content=self._pending_rrs,
- )
- self._pending_rrs = {}
- self._rrs_pending_flush = False
- yield edu
+ # Send at most limit EDUs for receipts.
+ for content in self._pending_receipt_edus[:limit]:
+ yield Edu(
+ origin=self._server_name,
+ destination=self._destination,
+ edu_type=EduTypes.RECEIPT,
+ content=content,
+ )
+ self._pending_receipt_edus = self._pending_receipt_edus[limit:]
+
+ # If there are still pending read-receipts, don't reset the pending flush
+ # flag.
+ if not self._pending_receipt_edus:
+ self._rrs_pending_flush = False
def _pop_pending_edus(self, limit: int) -> List[Edu]:
pending_edus = self._pending_edus
@@ -596,7 +641,7 @@ async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]
if not message_id:
continue
- set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
+ set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id)
edus = [
Edu(
@@ -644,27 +689,61 @@ class _TransactionQueueManager:
async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
# First we calculate the EDUs we want to send, if any.
- # We start by fetching device related EDUs, i.e device updates and to
- # device messages. We have to keep 2 free slots for presence and rr_edus.
- device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2
+ # There's a maximum number of EDUs that can be sent with a transaction,
+ # generally device updates and to-device messages get priority, but we
+ # want to ensure that there's room for some other EDUs as well.
+ #
+ # This is done by:
+ #
+ # * Add a presence EDU, if one exists.
+ # * Add up-to a small limit of read receipt EDUs.
+ # * Add to-device EDUs, but leave some space for device list updates.
+ # * Add device list updates EDUs.
+ # * If there's any remaining room, add other EDUs.
+ pending_edus = []
+
+ # Add presence EDU.
+ if self.queue._pending_presence:
+ pending_edus.append(
+ Edu(
+ origin=self.queue._server_name,
+ destination=self.queue._destination,
+ edu_type=EduTypes.PRESENCE,
+ content={
+ "push": [
+ format_user_presence_state(
+ presence, self.queue._clock.time_msec()
+ )
+ for presence in self.queue._pending_presence.values()
+ ]
+ },
+ )
+ )
+ self.queue._pending_presence = {}
- # We prioritize to-device messages so that existing encryption channels
+ # Add read receipt EDUs.
+ pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
+ edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
+
+ # Next, prioritize to-device messages so that existing encryption channels
# work. We also keep a few slots spare (by reducing the limit) so that
# we can still trickle out some device list updates.
(
to_device_edus,
device_stream_id,
- ) = await self.queue._get_to_device_message_edus(device_edu_limit - 10)
+ ) = await self.queue._get_to_device_message_edus(edu_limit - 10)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
- device_edu_limit -= len(to_device_edus)
+ pending_edus.extend(to_device_edus)
+ edu_limit -= len(to_device_edus)
+ # Add device list update EDUs.
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
- device_edu_limit
+ edu_limit
)
if device_update_edus:
@@ -672,40 +751,17 @@ async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
else:
self.queue._last_device_list_stream_id = dev_list_id
- pending_edus = device_update_edus + to_device_edus
-
- # Now add the read receipt EDU.
- pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
-
- # And presence EDU.
- if self.queue._pending_presence:
- pending_edus.append(
- Edu(
- origin=self.queue._server_name,
- destination=self.queue._destination,
- edu_type=EduTypes.PRESENCE,
- content={
- "push": [
- format_user_presence_state(
- presence, self.queue._clock.time_msec()
- )
- for presence in self.queue._pending_presence.values()
- ]
- },
- )
- )
- self.queue._pending_presence = {}
+ pending_edus.extend(device_update_edus)
+ edu_limit -= len(device_update_edus)
# Finally add any other types of EDUs if there is room.
- pending_edus.extend(
- self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
- )
- while (
- len(pending_edus) < MAX_EDUS_PER_TRANSACTION
- and self.queue._pending_edus_keyed
- ):
+ other_edus = self.queue._pop_pending_edus(edu_limit)
+ pending_edus.extend(other_edus)
+ edu_limit -= len(other_edus)
+ while edu_limit > 0 and self.queue._pending_edus_keyed:
_, val = self.queue._pending_edus_keyed.popitem()
pending_edus.append(val)
+ edu_limit -= 1
# Now we look for any PDUs to send, by getting up to 50 PDUs from the
# queue
@@ -716,8 +772,10 @@ async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
# if we've decided to send a transaction anyway, and we have room, we
# may as well send any pending RRs
- if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
- pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
+ if edu_limit:
+ pending_edus.extend(
+ self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
+ )
if self._pdus:
self._last_stream_ordering = self._pdus[
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index cd39d4d1113a..77f1f39cacb1 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -185,9 +185,8 @@ async def timestamp_to_event(
Raises:
Various exceptions when the request fails
"""
- path = _create_path(
- FEDERATION_UNSTABLE_PREFIX,
- "/org.matrix.msc3030/timestamp_to_event/%s",
+ path = _create_v1_path(
+ "/timestamp_to_event/%s",
room_id,
)
@@ -280,12 +279,11 @@ async def make_membership_event(
Note that this does not append any events to any graphs.
Args:
- destination (str): address of remote homeserver
- room_id (str): room to join/leave
- user_id (str): user to be joined/left
- membership (str): one of join/leave
- params (dict[str, str|Iterable[str]]): Query parameters to include in the
- request.
+ destination: address of remote homeserver
+ room_id: room to join/leave
+ user_id: user to be joined/left
+ membership: one of join/leave
+ params: Query parameters to include in the request.
Returns:
Succeeds when we get a 2xx HTTP response. The result
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 50623cd38513..2725f53cf6d9 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -25,7 +25,6 @@
from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet,
- FederationTimestampLookupServlet,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
@@ -291,13 +290,6 @@ def register_servlets(
)
for servletclass in SERVLET_GROUPS[servlet_group]:
- # Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
- if (
- servletclass == FederationTimestampLookupServlet
- and not hs.config.experimental.msc3030_enabled
- ):
- continue
-
# Only allow the `/account_status` servlet if msc3720 is enabled
if (
servletclass == FederationAccountStatusServlet
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index 1db8009d6ccf..cdaf0d5de782 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -224,10 +224,10 @@ class BaseFederationServlet:
With arguments:
- origin (unicode|None): The authenticated server_name of the calling server,
+ origin (str|None): The authenticated server_name of the calling server,
unless REQUIRE_AUTH is set to False and authentication failed.
- content (unicode|None): decoded json body of the request. None if the
+ content (str|None): decoded json body of the request. None if the
request was a GET.
query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index 6bb4659c4c9e..53e77b4bb62b 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -218,14 +218,13 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
`dir` can be `f` or `b` to indicate forwards and backwards in time from the
given timestamp.
- GET /_matrix/federation/unstable/org.matrix.msc3030/timestamp_to_event/?ts=&dir=
+ GET /_matrix/federation/v1/timestamp_to_event/?ts=&dir=
{
"event_id": ...
}
"""
PATH = "/timestamp_to_event/(?P[^/]*)/?"
- PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3030"
async def on_GET(
self,
@@ -489,7 +488,7 @@ async def on_PUT(
room_version = content["room_version"]
event = content["event"]
- invite_room_state = content["invite_room_state"]
+ invite_room_state = content.get("invite_room_state", [])
# Synapse expects invite_room_state to be in unsigned, as it is in v1
# API
@@ -499,6 +498,11 @@ async def on_PUT(
result = await self.handler.on_invite_request(
origin, event, room_version_id=room_version
)
+
+ # We only store invite_room_state for internal use, so remove it before
+ # returning the event to the remote homeserver.
+ result["event"].get("unsigned", {}).pop("invite_room_state", None)
+
return 200, result
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 0478448b47ea..fc21d5800159 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -225,7 +225,7 @@ async def get_new_events(
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index f2989cc4a214..5bf8e863875b 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -100,6 +100,7 @@ async def get_user(self, user: UserID) -> Optional[JsonDict]:
user_info_dict["avatar_url"] = profile.avatar_url
user_info_dict["threepids"] = threepids
user_info_dict["external_ids"] = external_ids
+ user_info_dict["erased"] = await self.store.is_user_erased(user.to_string())
return user_info_dict
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 203b62e0151b..5d1d21cdc8c6 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -109,10 +109,13 @@ async def _notify_interested_services(self, max_token: RoomStreamToken) -> None:
last_token = await self.store.get_appservice_last_pos()
(
upper_bound,
- events,
event_to_received_ts,
- ) = await self.store.get_all_new_events_stream(
- last_token, self.current_max, limit=100, get_prev_content=True
+ ) = await self.store.get_all_new_event_ids_stream(
+ last_token, self.current_max, limit=100
+ )
+
+ events = await self.store.get_events_as_list(
+ event_to_received_ts.keys(), get_prev_content=True
)
events_by_room: Dict[str, List[EventBase]] = {}
@@ -575,9 +578,6 @@ async def _get_to_device_messages(
device_id,
), messages in recipient_device_to_messages.items():
for message_json in messages:
- # Remove 'message_id' from the to-device message, as it's an internal ID
- message_json.pop("message_id", None)
-
message_payload.append(
{
"to_user_id": user_id,
@@ -612,8 +612,8 @@ async def _get_device_list_summary(
)
# Fetch the users who have modified their device list since then.
- users_with_changed_device_lists = (
- await self.store.get_users_whose_devices_changed(from_key, to_key=new_key)
+ users_with_changed_device_lists = await self.store.get_all_devices_changed(
+ from_key, to_key=new_key
)
# Filter out any users the application service is not interested in
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index f5f0e0e7a773..8b9ef25d296f 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -38,6 +38,7 @@
import attr
import bcrypt
import unpaddedbase64
+from prometheus_client import Counter
from twisted.internet.defer import CancelledError
from twisted.web.server import Request
@@ -48,6 +49,7 @@
Codes,
InteractiveAuthIncompleteError,
LoginError,
+ NotFoundError,
StoreError,
SynapseError,
UserDeactivatedError,
@@ -63,10 +65,14 @@
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.databases.main.registration import (
+ LoginTokenExpired,
+ LoginTokenLookupResult,
+ LoginTokenReused,
+)
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
-from synapse.util.macaroons import LoginTokenAttributes
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import base62_encode
from synapse.util.threepids import canonicalise_email
@@ -80,6 +86,12 @@
INVALID_USERNAME_OR_PASSWORD = "Invalid username or password"
+invalid_login_token_counter = Counter(
+ "synapse_user_login_invalid_login_tokens",
+ "Counts the number of rejected m.login.token on /login",
+ ["reason"],
+)
+
def convert_client_dict_legacy_fields_to_identifier(
submission: JsonDict,
@@ -883,6 +895,25 @@ def _verify_refresh_token(self, token: str) -> bool:
return True
+ async def create_login_token_for_user_id(
+ self,
+ user_id: str,
+ duration_ms: int = (2 * 60 * 1000),
+ auth_provider_id: Optional[str] = None,
+ auth_provider_session_id: Optional[str] = None,
+ ) -> str:
+ login_token = self.generate_login_token()
+ now = self._clock.time_msec()
+ expiry_ts = now + duration_ms
+ await self.store.add_login_token_to_user(
+ user_id=user_id,
+ token=login_token,
+ expiry_ts=expiry_ts,
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+ return login_token
+
async def create_refresh_token_for_user_id(
self,
user_id: str,
@@ -1401,6 +1432,18 @@ async def _check_local_password(self, user_id: str, password: str) -> Optional[s
return None
return user_id
+ def generate_login_token(self) -> str:
+ """Generates an opaque string, for use as an short-term login token"""
+
+ # we use the following format for access tokens:
+ # syl__
+
+ random_string = stringutils.random_string(20)
+ base = f"syl_{random_string}"
+
+ crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
+ return f"{base}_{crc}"
+
def generate_access_token(self, for_user: UserID) -> str:
"""Generates an opaque string, for use as an access token"""
@@ -1427,16 +1470,17 @@ def generate_refresh_token(self, for_user: UserID) -> str:
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
return f"{base}_{crc}"
- async def validate_short_term_login_token(
- self, login_token: str
- ) -> LoginTokenAttributes:
+ async def consume_login_token(self, login_token: str) -> LoginTokenLookupResult:
try:
- res = self.macaroon_gen.verify_short_term_login_token(login_token)
- except Exception:
- raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
+ return await self.store.consume_login_token(login_token)
+ except LoginTokenExpired:
+ invalid_login_token_counter.labels("expired").inc()
+ except LoginTokenReused:
+ invalid_login_token_counter.labels("reused").inc()
+ except NotFoundError:
+ invalid_login_token_counter.labels("not found").inc()
- await self.auth_blocking.check_auth_blocking(res.user_id)
- return res
+ raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN)
async def delete_access_token(self, access_token: str) -> None:
"""Invalidate a single access token
@@ -1711,7 +1755,7 @@ async def complete_sso_login(
)
# Create a login token
- login_token = self.macaroon_gen.generate_short_term_login_token(
+ login_token = await self.create_login_token_for_user_id(
registered_user_id,
auth_provider_id=auth_provider_id,
auth_provider_session_id=auth_provider_session_id,
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 816e1a6d79c8..d74d135c0c50 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -16,6 +16,7 @@
from typing import TYPE_CHECKING, Optional
from synapse.api.errors import SynapseError
+from synapse.handlers.device import DeviceHandler
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import Codes, Requester, UserID, create_requester
@@ -76,6 +77,9 @@ async def deactivate_account(
True if identity server supports removing threepids, otherwise False.
"""
+ # This can only be called on the main process.
+ assert isinstance(self._device_handler, DeviceHandler)
+
# Check if this user can be deactivated
if not await self._third_party_rules.check_can_deactivate_user(
user_id, by_admin
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index f9cc5bddbcbd..d4750a32e644 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -65,6 +65,8 @@
class DeviceWorkerHandler:
+ device_list_updater: "DeviceListWorkerUpdater"
+
def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.hs = hs
@@ -76,6 +78,8 @@ def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
+ self.device_list_updater = DeviceListWorkerUpdater(hs)
+
@trace
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
"""
@@ -99,6 +103,19 @@ async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
log_kv(device_map)
return devices
+ async def get_dehydrated_device(
+ self, user_id: str
+ ) -> Optional[Tuple[str, JsonDict]]:
+ """Retrieve the information for a dehydrated device.
+
+ Args:
+ user_id: the user whose dehydrated device we are looking for
+ Returns:
+ a tuple whose first item is the device ID, and the second item is
+ the dehydrated device information
+ """
+ return await self.store.get_dehydrated_device(user_id)
+
@trace
async def get_device(self, user_id: str, device_id: str) -> JsonDict:
"""Retrieve the given device
@@ -127,7 +144,7 @@ async def get_device(self, user_id: str, device_id: str) -> JsonDict:
@cancellable
async def get_device_changes_in_shared_rooms(
self, user_id: str, room_ids: Collection[str], from_token: StreamToken
- ) -> Collection[str]:
+ ) -> Set[str]:
"""Get the set of users whose devices have changed who share a room with
the given user.
"""
@@ -320,6 +337,8 @@ async def handle_room_un_partial_stated(self, room_id: str) -> None:
class DeviceHandler(DeviceWorkerHandler):
+ device_list_updater: "DeviceListUpdater"
+
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
@@ -606,19 +625,6 @@ async def store_dehydrated_device(
await self.delete_devices(user_id, [old_device_id])
return device_id
- async def get_dehydrated_device(
- self, user_id: str
- ) -> Optional[Tuple[str, JsonDict]]:
- """Retrieve the information for a dehydrated device.
-
- Args:
- user_id: the user whose dehydrated device we are looking for
- Returns:
- a tuple whose first item is the device ID, and the second item is
- the dehydrated device information
- """
- return await self.store.get_dehydrated_device(user_id)
-
async def rehydrate_device(
self, user_id: str, access_token: str, device_id: str
) -> dict:
@@ -682,13 +688,33 @@ async def _handle_new_device_update_async(self) -> None:
hosts_already_sent_to: Set[str] = set()
try:
+ stream_id, room_id = await self.store.get_device_change_last_converted_pos()
+
while True:
self._handle_new_device_update_new_data = False
- rows = await self.store.get_uncoverted_outbound_room_pokes()
+ max_stream_id = self.store.get_device_stream_token()
+ rows = await self.store.get_uncoverted_outbound_room_pokes(
+ stream_id, room_id
+ )
if not rows:
# If the DB returned nothing then there is nothing left to
# do, *unless* a new device list update happened during the
# DB query.
+
+ # Advance `(stream_id, room_id)`.
+ # `max_stream_id` comes from *before* the query for unconverted
+ # rows, which means that any unconverted rows must have a larger
+ # stream ID.
+ if max_stream_id > stream_id:
+ stream_id, room_id = max_stream_id, ""
+ await self.store.set_device_change_last_converted_pos(
+ stream_id, room_id
+ )
+ else:
+ assert max_stream_id == stream_id
+ # Avoid moving `room_id` backwards.
+ pass
+
if self._handle_new_device_update_new_data:
continue
else:
@@ -718,7 +744,6 @@ async def _handle_new_device_update_async(self) -> None:
user_id=user_id,
device_id=device_id,
room_id=room_id,
- stream_id=stream_id,
hosts=hosts,
context=opentracing_context,
)
@@ -752,6 +777,12 @@ async def _handle_new_device_update_async(self) -> None:
hosts_already_sent_to.update(hosts)
current_stream_id = stream_id
+ # Advance `(stream_id, room_id)`.
+ _, _, room_id, stream_id, _ = rows[-1]
+ await self.store.set_device_change_last_converted_pos(
+ stream_id, room_id
+ )
+
finally:
self._handle_new_device_update_is_processing = False
@@ -834,7 +865,6 @@ async def handle_room_un_partial_stated(self, room_id: str) -> None:
user_id=user_id,
device_id=device_id,
room_id=room_id,
- stream_id=None,
hosts=potentially_changed_hosts,
context=None,
)
@@ -858,7 +888,36 @@ def _update_device_from_client_ips(
)
-class DeviceListUpdater:
+class DeviceListWorkerUpdater:
+ "Handles incoming device list updates from federation and contacts the main process over replication"
+
+ def __init__(self, hs: "HomeServer"):
+ from synapse.replication.http.devices import (
+ ReplicationUserDevicesResyncRestServlet,
+ )
+
+ self._user_device_resync_client = (
+ ReplicationUserDevicesResyncRestServlet.make_client(hs)
+ )
+
+ async def user_device_resync(
+ self, user_id: str, mark_failed_as_stale: bool = True
+ ) -> Optional[JsonDict]:
+ """Fetches all devices for a user and updates the device cache with them.
+
+ Args:
+ user_id: The user's id whose device_list will be updated.
+ mark_failed_as_stale: Whether to mark the user's device list as stale
+ if the attempt to resync failed.
+ Returns:
+ A dict with device info as under the "devices" in the result of this
+ request:
+ https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
+ """
+ return await self._user_device_resync_client(user_id=user_id)
+
+
+class DeviceListUpdater(DeviceListWorkerUpdater):
"Handles incoming device list updates from federation and updates the DB"
def __init__(self, hs: "HomeServer", device_handler: DeviceHandler):
@@ -937,7 +996,10 @@ async def incoming_device_list_update(
# Check if we are partially joining any rooms. If so we need to store
# all device list updates so that we can handle them correctly once we
# know who is in the room.
- partial_rooms = await self.store.get_partial_state_rooms_and_servers()
+ # TODO(faster_joins): this fetches and processes a bunch of data that we don't
+ # use. Could be replaced by a tighter query e.g.
+ # SELECT EXISTS(SELECT 1 FROM partial_state_rooms)
+ partial_rooms = await self.store.get_partial_state_room_resync_info()
if partial_rooms:
await self.store.add_remote_device_list_to_pending(
user_id,
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 444c08bc2eef..75e89850f5bc 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -15,7 +15,7 @@
import logging
from typing import TYPE_CHECKING, Any, Dict
-from synapse.api.constants import EduTypes, ToDeviceEventTypes
+from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes
from synapse.api.errors import SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.logging.context import run_in_background
@@ -216,14 +216,24 @@ async def send_device_message(
"""
sender_user_id = requester.user.to_string()
- message_id = random_string(16)
- set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
-
- log_kv({"number_of_to_device_messages": len(messages)})
- set_tag("sender", sender_user_id)
+ set_tag(SynapseTags.TO_DEVICE_TYPE, message_type)
+ set_tag(SynapseTags.TO_DEVICE_SENDER, sender_user_id)
local_messages = {}
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
for user_id, by_device in messages.items():
+ # add an opentracing log entry for each message
+ for device_id, message_content in by_device.items():
+ log_kv(
+ {
+ "event": "send_to_device_message",
+ "user_id": user_id,
+ "device_id": device_id,
+ EventContentFields.TO_DEVICE_MSGID: message_content.get(
+ EventContentFields.TO_DEVICE_MSGID
+ ),
+ }
+ )
+
# Ratelimit local cross-user key requests by the sending device.
if (
message_type == ToDeviceEventTypes.RoomKeyRequest
@@ -233,6 +243,7 @@ async def send_device_message(
requester, (sender_user_id, requester.device_id)
)
if not allowed:
+ log_kv({"message": f"dropping key requests to {user_id}"})
logger.info(
"Dropping room_key_request from %s to %s due to rate limit",
sender_user_id,
@@ -247,18 +258,11 @@ async def send_device_message(
"content": message_content,
"type": message_type,
"sender": sender_user_id,
- "message_id": message_id,
}
for device_id, message_content in by_device.items()
}
if messages_by_device:
local_messages[user_id] = messages_by_device
- log_kv(
- {
- "user_id": user_id,
- "device_id": list(messages_by_device),
- }
- )
else:
destination = get_domain_from_id(user_id)
remote_messages.setdefault(destination, {})[user_id] = by_device
@@ -267,7 +271,11 @@ async def send_device_message(
remote_edu_contents = {}
for destination, messages in remote_messages.items():
- log_kv({"destination": destination})
+ # The EDU contains a "message_id" property which is used for
+ # idempotence. Make up a random one.
+ message_id = random_string(16)
+ log_kv({"destination": destination, "message_id": message_id})
+
remote_edu_contents[destination] = {
"messages": messages,
"sender": sender_user_id,
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 7127d5aefcb7..2ea52257cb9e 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -16,6 +16,8 @@
import string
from typing import TYPE_CHECKING, Iterable, List, Optional
+from typing_extensions import Literal
+
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes
from synapse.api.errors import (
AuthError,
@@ -83,7 +85,7 @@ async def _create_association(
# TODO(erikj): Add transactions.
# TODO(erikj): Check if there is a current association.
if not servers:
- servers = await self._storage_controllers.state.get_current_hosts_in_room(
+ servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
@@ -288,7 +290,7 @@ async def get_association(self, room_alias: RoomAlias) -> JsonDict:
Codes.NOT_FOUND,
)
- extra_servers = await self._storage_controllers.state.get_current_hosts_in_room(
+ extra_servers = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
servers_set = set(extra_servers) | set(servers)
@@ -429,7 +431,10 @@ async def _user_can_delete_alias(
return await self.auth.check_can_change_room_list(room_id, requester)
async def edit_published_room_list(
- self, requester: Requester, room_id: str, visibility: str
+ self,
+ requester: Requester,
+ room_id: str,
+ visibility: Literal["public", "private"],
) -> None:
"""Edit the entry of the room in the published room list.
@@ -451,9 +456,6 @@ async def edit_published_room_list(
if requester.is_guest:
raise AuthError(403, "Guests cannot edit the published room list")
- if visibility not in ["public", "private"]:
- raise SynapseError(400, "Invalid visibility setting")
-
if visibility == "public" and not self.enable_room_list_search:
# The room list has been disabled.
raise AuthError(
@@ -505,7 +507,11 @@ async def edit_published_room_list(
await self.store.set_room_is_public(room_id, making_public)
async def edit_published_appservice_room_list(
- self, appservice_id: str, network_id: str, room_id: str, visibility: str
+ self,
+ appservice_id: str,
+ network_id: str,
+ room_id: str,
+ visibility: Literal["public", "private"],
) -> None:
"""Add or remove a room from the appservice/network specific public
room list.
@@ -516,9 +522,6 @@ async def edit_published_appservice_room_list(
room_id
visibility: either "public" or "private"
"""
- if visibility not in ["public", "private"]:
- raise SynapseError(400, "Invalid visibility setting")
-
await self.store.set_room_is_public_appservice(
room_id, appservice_id, network_id, visibility == "public"
)
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 09a2492afc9e..5fe102e2f2f3 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -27,9 +27,9 @@
from synapse.api.constants import EduTypes
from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
+from synapse.handlers.device import DeviceHandler
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
-from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.types import (
JsonDict,
UserID,
@@ -49,33 +49,30 @@
class E2eKeysHandler:
def __init__(self, hs: "HomeServer"):
+ self.config = hs.config
self.store = hs.get_datastores().main
self.federation = hs.get_federation_client()
self.device_handler = hs.get_device_handler()
self.is_mine = hs.is_mine
self.clock = hs.get_clock()
- self._edu_updater = SigningKeyEduUpdater(hs, self)
-
federation_registry = hs.get_federation_registry()
- self._is_master = hs.config.worker.worker_app is None
- if not self._is_master:
- self._user_device_resync_client = (
- ReplicationUserDevicesResyncRestServlet.make_client(hs)
- )
- else:
+ is_master = hs.config.worker.worker_app is None
+ if is_master:
+ edu_updater = SigningKeyEduUpdater(hs)
+
# Only register this edu handler on master as it requires writing
# device updates to the db
federation_registry.register_edu_handler(
EduTypes.SIGNING_KEY_UPDATE,
- self._edu_updater.incoming_signing_key_update,
+ edu_updater.incoming_signing_key_update,
)
# also handle the unstable version
# FIXME: remove this when enough servers have upgraded
federation_registry.register_edu_handler(
EduTypes.UNSTABLE_SIGNING_KEY_UPDATE,
- self._edu_updater.incoming_signing_key_update,
+ edu_updater.incoming_signing_key_update,
)
# doesn't really work as part of the generic query API, because the
@@ -318,14 +315,13 @@ async def _query_devices_for_destination(
# probably be tracking their device lists. However, we haven't
# done an initial sync on the device list so we do it now.
try:
- if self._is_master:
- resync_results = await self.device_handler.device_list_updater.user_device_resync(
+ resync_results = (
+ await self.device_handler.device_list_updater.user_device_resync(
user_id
)
- else:
- resync_results = await self._user_device_resync_client(
- user_id=user_id
- )
+ )
+ if resync_results is None:
+ raise ValueError("Device resync failed")
# Add the device keys to the results.
user_devices = resync_results["devices"]
@@ -431,13 +427,17 @@ async def get_cross_signing_keys_from_cache(
@trace
@cancellable
async def query_local_devices(
- self, query: Mapping[str, Optional[List[str]]]
+ self,
+ query: Mapping[str, Optional[List[str]]],
+ include_displaynames: bool = True,
) -> Dict[str, Dict[str, dict]]:
"""Get E2E device keys for local users
Args:
query: map from user_id to a list
of devices to query (None for all devices)
+ include_displaynames: Whether to include device displaynames in the returned
+ device details.
Returns:
A map from user_id -> device_id -> device details
@@ -469,7 +469,9 @@ async def query_local_devices(
# make sure that each queried user appears in the result dict
result_dict[user_id] = {}
- results = await self.store.get_e2e_device_keys_for_cs_api(local_query)
+ results = await self.store.get_e2e_device_keys_for_cs_api(
+ local_query, include_displaynames
+ )
# Build the result structure
for user_id, device_keys in results.items():
@@ -482,11 +484,33 @@ async def query_local_devices(
async def on_federation_query_client_keys(
self, query_body: Dict[str, Dict[str, Optional[List[str]]]]
) -> JsonDict:
- """Handle a device key query from a federated server"""
+ """Handle a device key query from a federated server:
+
+ Handles the path: GET /_matrix/federation/v1/users/keys/query
+
+ Args:
+ query_body: The body of the query request. Should contain a key
+ "device_keys" that map to a dictionary of user ID's -> list of
+ device IDs. If the list of device IDs is empty, all devices of
+ that user will be queried.
+
+ Returns:
+ A json dictionary containing the following:
+ - device_keys: A dictionary containing the requested device information.
+ - master_keys: An optional dictionary of user ID -> master cross-signing
+ key info.
+ - self_signing_key: An optional dictionary of user ID -> self-signing
+ key info.
+ """
device_keys_query: Dict[str, Optional[List[str]]] = query_body.get(
"device_keys", {}
)
- res = await self.query_local_devices(device_keys_query)
+ res = await self.query_local_devices(
+ device_keys_query,
+ include_displaynames=(
+ self.config.federation.allow_device_name_lookup_over_federation
+ ),
+ )
ret = {"device_keys": res}
# add in the cross-signing keys
@@ -576,6 +600,8 @@ async def claim_client_keys(destination: str) -> None:
async def upload_keys_for_user(
self, user_id: str, device_id: str, keys: JsonDict
) -> JsonDict:
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
time_now = self.clock.time_msec()
@@ -703,6 +729,8 @@ async def upload_signing_keys_for_user(
user_id: the user uploading the keys
keys: the signing keys
"""
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
# if a master key is uploaded, then check it. Otherwise, load the
# stored master key, to check signatures on other keys
@@ -794,6 +822,9 @@ async def upload_signatures_for_device_keys(
Raises:
SynapseError: if the signatures dict is not valid.
"""
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
+
failures = {}
# signatures to be stored. Each item will be a SignatureListItem
@@ -841,7 +872,7 @@ async def _process_self_signatures(
- signatures of the user's master key by the user's devices.
Args:
- user_id (string): the user uploading the keys
+ user_id: the user uploading the keys
signatures (dict[string, dict]): map of devices to signed keys
Returns:
@@ -1171,6 +1202,9 @@ async def _retrieve_cross_signing_keys_for_remote_user(
A tuple of the retrieved key content, the key's ID and the matching VerifyKey.
If the key cannot be retrieved, all values in the tuple will instead be None.
"""
+ # This can only be called from the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
+
try:
remote_result = await self.federation.query_user_devices(
user.domain, user.to_string()
@@ -1367,11 +1401,14 @@ class SignatureListItem:
class SigningKeyEduUpdater:
"""Handles incoming signing key updates from federation and updates the DB"""
- def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler):
+ def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.federation = hs.get_federation_client()
self.clock = hs.get_clock()
- self.e2e_keys_handler = e2e_keys_handler
+
+ device_handler = hs.get_device_handler()
+ assert isinstance(device_handler, DeviceHandler)
+ self._device_handler = device_handler
self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
@@ -1416,9 +1453,6 @@ async def _handle_signing_key_updates(self, user_id: str) -> None:
user_id: the user whose updates we are processing
"""
- device_handler = self.e2e_keys_handler.device_handler
- device_list_updater = device_handler.device_list_updater
-
async with self._remote_edu_linearizer.queue(user_id):
pending_updates = self._pending_updates.pop(user_id, [])
if not pending_updates:
@@ -1430,13 +1464,11 @@ async def _handle_signing_key_updates(self, user_id: str) -> None:
logger.info("pending updates: %r", pending_updates)
for master_key, self_signing_key in pending_updates:
- new_device_ids = (
- await device_list_updater.process_cross_signing_key_update(
- user_id,
- master_key,
- self_signing_key,
- )
+ new_device_ids = await self._device_handler.device_list_updater.process_cross_signing_key_update(
+ user_id,
+ master_key,
+ self_signing_key,
)
device_ids = device_ids + new_device_ids
- await device_handler.notify_device_update(user_id, device_ids)
+ await self._device_handler.notify_device_update(user_id, device_ids)
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 28dc08c22a36..83f53ceb8891 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -377,8 +377,9 @@ async def delete_version(self, user_id: str, version: Optional[str] = None) -> N
"""Deletes a given version of the user's e2e_room_keys backup
Args:
- user_id(str): the user whose current backup version we're deleting
- version(str): the version id of the backup being deleted
+ user_id: the user whose current backup version we're deleting
+ version: Optional. the version ID of the backup version we're deleting
+ If missing, we delete the current backup version info.
Raises:
NotFoundError: if this backup version doesn't exist
"""
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index 8249ca1ed26c..f91dbbecb79c 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Collection, List, Optional, Union
+from typing import TYPE_CHECKING, Collection, List, Mapping, Optional, Union
from synapse import event_auth
from synapse.api.constants import (
@@ -29,7 +29,6 @@
)
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
-from synapse.events.snapshot import EventContext
from synapse.types import StateMap, get_domain_from_id
if TYPE_CHECKING:
@@ -46,17 +45,27 @@ class EventAuthHandler:
def __init__(self, hs: "HomeServer"):
self._clock = hs.get_clock()
self._store = hs.get_datastores().main
+ self._state_storage_controller = hs.get_storage_controllers().state
self._server_name = hs.hostname
async def check_auth_rules_from_context(
self,
event: EventBase,
- context: EventContext,
+ batched_auth_events: Optional[Mapping[str, EventBase]] = None,
) -> None:
- """Check an event passes the auth rules at its own auth events"""
- await check_state_independent_auth_rules(self._store, event)
+ """Check an event passes the auth rules at its own auth events
+ Args:
+ event: event to be authed
+ batched_auth_events: if the event being authed is part of a batch, any events
+ from the same batch that may be necessary to auth the current event
+ """
+ await check_state_independent_auth_rules(
+ self._store, event, batched_auth_events
+ )
auth_event_ids = event.auth_event_ids()
auth_events_by_id = await self._store.get_events(auth_event_ids)
+ if batched_auth_events:
+ auth_events_by_id.update(batched_auth_events)
check_state_dependent_auth_rules(event, auth_events_by_id.values())
def compute_auth_events(
@@ -171,17 +180,22 @@ async def assert_host_in_room(
this function may return an incorrect result as we are not able to fully
track server membership in a room without full state.
"""
- if not allow_partial_state_rooms and await self._store.is_partial_state_room(
- room_id
- ):
- raise AuthError(
- 403,
- "Unable to authorise you right now; room is partial-stated here.",
- errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
- )
-
- if not await self.is_host_in_room(room_id, host):
- raise AuthError(403, "Host not in room.")
+ if await self._store.is_partial_state_room(room_id):
+ if allow_partial_state_rooms:
+ current_hosts = await self._state_storage_controller.get_current_hosts_in_room_or_partial_state_approximation(
+ room_id
+ )
+ if host not in current_hosts:
+ raise AuthError(403, "Host not in room (partial-state approx).")
+ else:
+ raise AuthError(
+ 403,
+ "Unable to authorise you right now; room is partial-stated here.",
+ errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
+ )
+ else:
+ if not await self.is_host_in_room(room_id, host):
+ raise AuthError(403, "Host not in room.")
async def check_restricted_join_rules(
self,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 986ffed3d592..b2784d73339d 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -45,6 +45,7 @@
Codes,
FederationDeniedError,
FederationError,
+ FederationPullAttemptBackoffError,
HttpResponseException,
LimitExceededError,
NotFoundError,
@@ -69,8 +70,8 @@
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import JsonDict, get_domain_from_id
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination
from synapse.visibility import filter_events_for_server
@@ -151,6 +152,7 @@ def __init__(self, hs: "HomeServer"):
self._federation_event_handler = hs.get_federation_event_handler()
self._device_handler = hs.get_device_handler()
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
+ self._notifier = hs.get_notifier()
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs
@@ -378,6 +380,7 @@ async def _maybe_backfill_inner(
filtered_extremities = await filter_events_for_server(
self._storage_controllers,
self.server_name,
+ self.server_name,
events_to_check,
redact=False,
check_history_visibility_only=True,
@@ -441,6 +444,15 @@ async def try_backfill(domains: Collection[str]) -> bool:
# appropriate stuff.
# TODO: We can probably do something more intelligent here.
return True
+ except NotRetryingDestination as e:
+ logger.info("_maybe_backfill_inner: %s", e)
+ continue
+ except FederationDeniedError:
+ logger.info(
+ "_maybe_backfill_inner: Not attempting to backfill from %s because the homeserver is not on our federation whitelist",
+ dom,
+ )
+ continue
except (SynapseError, InvalidResponseError) as e:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
@@ -476,15 +488,9 @@ async def try_backfill(domains: Collection[str]) -> bool:
logger.info("Failed to backfill from %s because %s", dom, e)
continue
- except NotRetryingDestination as e:
- logger.info(str(e))
- continue
except RequestSendFailed as e:
logger.info("Failed to get backfill from %s because %s", dom, e)
continue
- except FederationDeniedError as e:
- logger.info(e)
- continue
except Exception as e:
logger.exception("Failed to backfill from %s because %s", dom, e)
continue
@@ -631,6 +637,7 @@ async def do_invite_join(
room_id=room_id,
servers=ret.servers_in_room,
device_lists_stream_id=self.store.get_device_stream_token(),
+ joined_via=origin,
)
try:
@@ -781,15 +788,27 @@ async def do_knock(
# Send the signed event back to the room, and potentially receive some
# further information about the room in the form of partial state events
- stripped_room_state = await self.federation_client.send_knock(
- target_hosts, event
- )
+ knock_response = await self.federation_client.send_knock(target_hosts, event)
# Store any stripped room state events in the "unsigned" key of the event.
# This is a bit of a hack and is cribbing off of invites. Basically we
# store the room state here and retrieve it again when this event appears
# in the invitee's sync stream. It is stripped out for all other local users.
- event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
+ stripped_room_state = (
+ knock_response.get("knock_room_state")
+ # Since v1.37, Synapse incorrectly used "knock_state_events" for this field.
+ # Thus, we also check for a 'knock_state_events' to support old instances.
+ # See https://github.com/matrix-org/synapse/issues/14088.
+ or knock_response.get("knock_state_events")
+ )
+
+ if stripped_room_state is None:
+ raise KeyError(
+ "Missing 'knock_room_state' (or legacy 'knock_state_events') field in "
+ "send_knock response"
+ )
+
+ event.unsigned["knock_room_state"] = stripped_room_state
context = EventContext.for_outlier(self._storage_controllers)
stream_id = await self._federation_event_handler.persist_events_and_notify(
@@ -928,7 +947,7 @@ async def on_make_join_request(
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
return event
async def on_invite_request(
@@ -1003,7 +1022,9 @@ async def on_invite_request(
context = EventContext.for_outlier(self._storage_controllers)
- await self._bulk_push_rule_evaluator.action_for_event_by_user(event, context)
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
+ )
try:
await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
@@ -1109,7 +1130,7 @@ async def on_make_leave_request(
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Failed to create new leave %r because %s", event, e)
raise e
@@ -1168,7 +1189,7 @@ async def on_make_knock_request(
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_knock_request`
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Failed to create new knock %r because %s", event, e)
raise e
@@ -1212,7 +1233,9 @@ async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]:
async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]:
- await self._event_auth_handler.assert_host_in_room(room_id, origin)
+ # We allow partially joined rooms since in this case we are filtering out
+ # non-local events in `filter_events_for_server`.
+ await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
# Synapse asks for 100 events per backfill request. Do not allow more.
limit = min(limit, 100)
@@ -1233,7 +1256,7 @@ async def on_backfill_request(
)
events = await filter_events_for_server(
- self._storage_controllers, origin, events
+ self._storage_controllers, origin, self.server_name, events
)
return events
@@ -1264,7 +1287,7 @@ async def get_persisted_pdu(
await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
events = await filter_events_for_server(
- self._storage_controllers, origin, [event]
+ self._storage_controllers, origin, self.server_name, [event]
)
event = events[0]
return event
@@ -1277,7 +1300,9 @@ async def on_get_missing_events(
latest_events: List[str],
limit: int,
) -> List[EventBase]:
- await self._event_auth_handler.assert_host_in_room(room_id, origin)
+ # We allow partially joined rooms since in this case we are filtering out
+ # non-local events in `filter_events_for_server`.
+ await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
# Only allow up to 20 events to be retrieved per request.
limit = min(limit, 20)
@@ -1290,7 +1315,7 @@ async def on_get_missing_events(
)
missing_events = await filter_events_for_server(
- self._storage_controllers, origin, missing_events
+ self._storage_controllers, origin, self.server_name, missing_events
)
return missing_events
@@ -1334,9 +1359,7 @@ async def exchange_third_party_invite(
try:
validate_event_for_room_version(event)
- await self._event_auth_handler.check_auth_rules_from_context(
- event, context
- )
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Denying new third party invite %r because %s", event, e)
raise e
@@ -1386,7 +1409,7 @@ async def on_exchange_third_party_invite_request(
try:
validate_event_for_room_version(event)
- await self._event_auth_handler.check_auth_rules_from_context(event, context)
+ await self._event_auth_handler.check_auth_rules_from_context(event)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
raise e
@@ -1579,8 +1602,8 @@ async def get_room_complexity(
Fetch the complexity of a remote room over federation.
Args:
- remote_room_hosts (list[str]): The remote servers to ask.
- room_id (str): The room ID to ask about.
+ remote_room_hosts: The remote servers to ask.
+ room_id: The room ID to ask about.
Returns:
Dict contains the complexity
@@ -1602,13 +1625,13 @@ async def _resume_sync_partial_state_room(self) -> None:
"""Resumes resyncing of all partial-state rooms after a restart."""
assert not self.config.worker.worker_app
- partial_state_rooms = await self.store.get_partial_state_rooms_and_servers()
- for room_id, servers_in_room in partial_state_rooms.items():
+ partial_state_rooms = await self.store.get_partial_state_room_resync_info()
+ for room_id, resync_info in partial_state_rooms.items():
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
- initial_destination=None,
- other_destinations=servers_in_room,
+ initial_destination=resync_info.joined_via,
+ other_destinations=resync_info.servers_in_room,
room_id=room_id,
)
@@ -1637,28 +1660,12 @@ async def _sync_partial_state_room(
# really leave, that might mean we have difficulty getting the room state over
# federation.
# https://github.com/matrix-org/synapse/issues/12802
- #
- # TODO(faster_joins): we need some way of prioritising which homeservers in
- # `other_destinations` to try first, otherwise we'll spend ages trying dead
- # homeservers for large rooms.
- # https://github.com/matrix-org/synapse/issues/12999
-
- if initial_destination is None and len(other_destinations) == 0:
- raise ValueError(
- f"Cannot resync state of {room_id}: no destinations provided"
- )
# Make an infinite iterator of destinations to try. Once we find a working
# destination, we'll stick with it until it flakes.
- destinations: Collection[str]
- if initial_destination is not None:
- # Move `initial_destination` to the front of the list.
- destinations = list(other_destinations)
- if initial_destination in destinations:
- destinations.remove(initial_destination)
- destinations = [initial_destination] + destinations
- else:
- destinations = other_destinations
+ destinations = _prioritise_destinations_for_partial_state_resync(
+ initial_destination, other_destinations, room_id
+ )
destination_iter = itertools.cycle(destinations)
# `destination` is the current remote homeserver we're pulling from.
@@ -1686,6 +1693,9 @@ async def _sync_partial_state_room(
self._storage_controllers.state.notify_room_un_partial_stated(
room_id
)
+ # Poke the notifier so that other workers see the write to
+ # the un-partial-stated rooms stream.
+ self._notifier.notify_replication()
# TODO(faster_joins) update room stats and user directory?
# https://github.com/matrix-org/synapse/issues/12814
@@ -1708,7 +1718,22 @@ async def _sync_partial_state_room(
destination, event
)
break
+ except FederationPullAttemptBackoffError as exc:
+ # Log a warning about why we failed to process the event (the error message
+ # for `FederationPullAttemptBackoffError` is pretty good)
+ logger.warning("_sync_partial_state_room: %s", exc)
+ # We do not record a failed pull attempt when we backoff fetching a missing
+ # `prev_event` because not being able to fetch the `prev_events` just means
+ # we won't be able to de-outlier the pulled event. But we can still use an
+ # `outlier` in the state/auth chain for another event. So we shouldn't stop
+ # a downstream event from trying to pull it.
+ #
+ # This avoids a cascade of backoff for all events in the DAG downstream from
+ # one event backoff upstream.
except FederationError as e:
+ # TODO: We should `record_event_failed_pull_attempt` here,
+ # see https://github.com/matrix-org/synapse/issues/13700
+
if attempt == len(destinations) - 1:
# We have tried every remote server for this event. Give up.
# TODO(faster_joins) giving up isn't the right thing to do
@@ -1741,3 +1766,29 @@ async def _sync_partial_state_room(
room_id,
destination,
)
+
+
+def _prioritise_destinations_for_partial_state_resync(
+ initial_destination: Optional[str],
+ other_destinations: Collection[str],
+ room_id: str,
+) -> Collection[str]:
+ """Work out the order in which we should ask servers to resync events.
+
+ If an `initial_destination` is given, it takes top priority. Otherwise
+ all servers are treated equally.
+
+ :raises ValueError: if no destination is provided at all.
+ """
+ if initial_destination is None and len(other_destinations) == 0:
+ raise ValueError(f"Cannot resync state of {room_id}: no destinations provided")
+
+ if initial_destination is None:
+ return other_destinations
+
+ # Move `initial_destination` to the front of the list.
+ destinations = list(other_destinations)
+ if initial_destination in destinations:
+ destinations.remove(initial_destination)
+ destinations = [initial_destination] + destinations
+ return destinations
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index da319943cc19..66aca2f8642b 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -43,7 +43,9 @@
from synapse.api.errors import (
AuthError,
Codes,
+ EventSizeError,
FederationError,
+ FederationPullAttemptBackoffError,
HttpResponseException,
RequestSendFailed,
SynapseError,
@@ -57,7 +59,7 @@
)
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
-from synapse.federation.federation_client import InvalidResponseError
+from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
from synapse.logging.context import nested_logging_context
from synapse.logging.opentracing import (
SynapseTags,
@@ -74,7 +76,6 @@
from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import (
PersistedEventPosition,
RoomStreamToken,
@@ -82,6 +83,7 @@
UserID,
get_domain_from_id,
)
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.iterutils import batch_iter
from synapse.util.retryutils import NotRetryingDestination
@@ -414,7 +416,9 @@ async def on_send_membership_event(
# First, precalculate the joined hosts so that the federation sender doesn't
# need to.
- await self._event_creation_handler.cache_joined_hosts_for_event(event, context)
+ await self._event_creation_handler.cache_joined_hosts_for_events(
+ [(event, context)]
+ )
await self._check_for_soft_fail(event, context=context, origin=origin)
await self._run_push_actions_and_persist_event(event, context)
@@ -565,6 +569,9 @@ async def update_state_for_partial_state_event(
event: partial-state event to be de-partial-stated
Raises:
+ FederationPullAttemptBackoffError if we are are deliberately not attempting
+ to pull the given event over federation because we've already done so
+ recently and are backing off.
FederationError if we fail to request state from the remote server.
"""
logger.info("Updating state for %s", event.event_id)
@@ -792,9 +799,42 @@ async def _process_pulled_events(
],
)
+ # Check if we already any of these have these events.
+ # Note: we currently make a lookup in the database directly here rather than
+ # checking the event cache, due to:
+ # https://github.com/matrix-org/synapse/issues/13476
+ existing_events_map = await self._store._get_events_from_db(
+ [event.event_id for event in events]
+ )
+
+ new_events = []
+ for event in events:
+ event_id = event.event_id
+
+ # If we've already seen this event ID...
+ if event_id in existing_events_map:
+ existing_event = existing_events_map[event_id]
+
+ # ...and the event itself was not previously stored as an outlier...
+ if not existing_event.event.internal_metadata.is_outlier():
+ # ...then there's no need to persist it. We have it already.
+ logger.info(
+ "_process_pulled_event: Ignoring received event %s which we "
+ "have already seen",
+ event.event_id,
+ )
+ continue
+
+ # While we have seen this event before, it was stored as an outlier.
+ # We'll now persist it as a non-outlier.
+ logger.info("De-outliering event %s", event_id)
+
+ # Continue on with the events that are new to us.
+ new_events.append(event)
+
# We want to sort these by depth so we process them and
# tell clients about them in order.
- sorted_events = sorted(events, key=lambda x: x.depth)
+ sorted_events = sorted(new_events, key=lambda x: x.depth)
for ev in sorted_events:
with nested_logging_context(ev.event_id):
await self._process_pulled_event(origin, ev, backfilled=backfilled)
@@ -846,18 +886,6 @@ async def _process_pulled_event(
event_id = event.event_id
- existing = await self._store.get_event(
- event_id, allow_none=True, allow_rejected=True
- )
- if existing:
- if not existing.internal_metadata.is_outlier():
- logger.info(
- "_process_pulled_event: Ignoring received event %s which we have already seen",
- event_id,
- )
- return
- logger.info("De-outliering event %s", event_id)
-
try:
self._sanity_check_event(event)
except SynapseError as err:
@@ -899,6 +927,18 @@ async def _process_pulled_event(
context,
backfilled=backfilled,
)
+ except FederationPullAttemptBackoffError as exc:
+ # Log a warning about why we failed to process the event (the error message
+ # for `FederationPullAttemptBackoffError` is pretty good)
+ logger.warning("_process_pulled_event: %s", exc)
+ # We do not record a failed pull attempt when we backoff fetching a missing
+ # `prev_event` because not being able to fetch the `prev_events` just means
+ # we won't be able to de-outlier the pulled event. But we can still use an
+ # `outlier` in the state/auth chain for another event. So we shouldn't stop
+ # a downstream event from trying to pull it.
+ #
+ # This avoids a cascade of backoff for all events in the DAG downstream from
+ # one event backoff upstream.
except FederationError as e:
await self._store.record_event_failed_pull_attempt(
event.room_id, event_id, str(e)
@@ -945,6 +985,9 @@ async def _compute_event_context_with_maybe_missing_prevs(
The event context.
Raises:
+ FederationPullAttemptBackoffError if we are are deliberately not attempting
+ to pull the given event over federation because we've already done so
+ recently and are backing off.
FederationError if we fail to get the state from the remote server after any
missing `prev_event`s.
"""
@@ -955,6 +998,18 @@ async def _compute_event_context_with_maybe_missing_prevs(
seen = await self._store.have_events_in_timeline(prevs)
missing_prevs = prevs - seen
+ # If we've already recently attempted to pull this missing event, don't
+ # try it again so soon. Since we have to fetch all of the prev_events, we can
+ # bail early here if we find any to ignore.
+ prevs_to_ignore = await self._store.get_event_ids_to_not_pull_from_backoff(
+ room_id, missing_prevs
+ )
+ if len(prevs_to_ignore) > 0:
+ raise FederationPullAttemptBackoffError(
+ event_ids=prevs_to_ignore,
+ message=f"While computing context for event={event_id}, not attempting to pull missing prev_event={prevs_to_ignore[0]} because we already tried to pull recently (backing off).",
+ )
+
if not missing_prevs:
return await self._state_handler.compute_event_context(event)
@@ -1011,10 +1066,9 @@ async def _compute_event_context_with_maybe_missing_prevs(
state_res_store=StateResolutionStore(self._store),
)
- except Exception:
+ except Exception as e:
logger.warning(
- "Error attempting to resolve state at missing prev_events",
- exc_info=True,
+ "Error attempting to resolve state at missing prev_events: %s", e
)
raise FederationError(
"ERROR",
@@ -1463,8 +1517,8 @@ async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> No
)
async def backfill_event_id(
- self, destination: str, room_id: str, event_id: str
- ) -> EventBase:
+ self, destinations: List[str], room_id: str, event_id: str
+ ) -> PulledPduInfo:
"""Backfill a single event and persist it as a non-outlier which means
we also pull in all of the state and auth events necessary for it.
@@ -1476,24 +1530,21 @@ async def backfill_event_id(
Raises:
FederationError if we are unable to find the event from the destination
"""
- logger.info(
- "backfill_event_id: event_id=%s from destination=%s", event_id, destination
- )
+ logger.info("backfill_event_id: event_id=%s", event_id)
room_version = await self._store.get_room_version(room_id)
- event_from_response = await self._federation_client.get_pdu(
- [destination],
+ pulled_pdu_info = await self._federation_client.get_pdu(
+ destinations,
event_id,
room_version,
)
- if not event_from_response:
+ if not pulled_pdu_info:
raise FederationError(
"ERROR",
404,
- "Unable to find event_id=%s from destination=%s to backfill."
- % (event_id, destination),
+ f"Unable to find event_id={event_id} from remote servers to backfill.",
affected=event_id,
)
@@ -1501,13 +1552,13 @@ async def backfill_event_id(
# and auth events to de-outlier it. This also sets up the necessary
# `state_groups` for the event.
await self._process_pulled_events(
- destination,
- [event_from_response],
+ pulled_pdu_info.pull_origin,
+ [pulled_pdu_info.pdu],
# Prevent notifications going to clients
backfilled=True,
)
- return event_from_response
+ return pulled_pdu_info
@trace
@tag_args
@@ -1530,19 +1581,19 @@ async def _get_events_and_persist(
async def get_event(event_id: str) -> None:
with nested_logging_context(event_id):
try:
- event = await self._federation_client.get_pdu(
+ pulled_pdu_info = await self._federation_client.get_pdu(
[destination],
event_id,
room_version,
)
- if event is None:
+ if pulled_pdu_info is None:
logger.warning(
"Server %s didn't return event %s",
destination,
event_id,
)
return
- events.append(event)
+ events.append(pulled_pdu_info.pdu)
except Exception as e:
logger.warning(
@@ -1686,6 +1737,15 @@ async def prep(event: EventBase) -> None:
except AuthError as e:
logger.warning("Rejecting %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
+ except EventSizeError as e:
+ if e.unpersistable:
+ # This event is completely unpersistable.
+ raise e
+ # Otherwise, we are somewhat lenient and just persist the event
+ # as rejected, for moderate compatibility with older Synapse
+ # versions.
+ logger.warning("While validating received event %r: %s", event, e)
+ context.rejected = RejectedReason.OVERSIZED_EVENT
events_and_contexts_to_persist.append((event, context))
@@ -1731,6 +1791,16 @@ async def _check_event_auth(
# TODO: use a different rejected reason here?
context.rejected = RejectedReason.AUTH_ERROR
return
+ except EventSizeError as e:
+ if e.unpersistable:
+ # This event is completely unpersistable.
+ raise e
+ # Otherwise, we are somewhat lenient and just persist the event
+ # as rejected, for moderate compatibility with older Synapse
+ # versions.
+ logger.warning("While validating received event %r: %s", event, e)
+ context.rejected = RejectedReason.OVERSIZED_EVENT
+ return
# next, check that we have all of the event's auth events.
#
@@ -2117,8 +2187,8 @@ async def _run_push_actions_and_persist_event(
min_depth,
)
else:
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ [(event, context)]
)
try:
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 93d09e993961..848e46eb9ba6 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -711,7 +711,7 @@ async def ask_id_server_for_third_party_invite(
inviter_display_name: The current display name of the
inviter.
inviter_avatar_url: The URL of the inviter's avatar.
- id_access_token (str): The access token to authenticate to the identity
+ id_access_token: The access token to authenticate to the identity
server with
Returns:
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 860c82c1107f..9c335e6863f4 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -57,13 +57,7 @@ def __init__(self, hs: "HomeServer"):
self.validator = EventValidator()
self.snapshot_cache: ResponseCache[
Tuple[
- str,
- Optional[StreamToken],
- Optional[StreamToken],
- str,
- Optional[int],
- bool,
- bool,
+ str, Optional[StreamToken], Optional[StreamToken], str, int, bool, bool
]
] = ResponseCache(hs.get_clock(), "initial_sync_cache")
self._event_serializer = hs.get_event_client_serializer()
@@ -154,11 +148,6 @@ async def _snapshot_all_rooms(
public_room_ids = await self.store.get_public_room_ids()
- if pagin_config.limit is not None:
- limit = pagin_config.limit
- else:
- limit = 10
-
serializer_options = SerializeEventConfig(as_client_event=as_client_event)
async def handle_room(event: RoomsForUser) -> None:
@@ -210,7 +199,7 @@ async def handle_room(event: RoomsForUser) -> None:
run_in_background(
self.store.get_recent_events_for_room,
event.room_id,
- limit=limit,
+ limit=pagin_config.limit,
end_token=room_end_token,
),
deferred_room_state,
@@ -360,15 +349,11 @@ async def _room_initial_sync_parted(
member_event_id
)
- limit = pagin_config.limit if pagin_config else None
- if limit is None:
- limit = 10
-
leave_position = await self.store.get_position_for_event(member_event_id)
stream_token = leave_position.to_room_stream_token()
messages, token = await self.store.get_recent_events_for_room(
- room_id, limit=limit, end_token=stream_token
+ room_id, limit=pagin_config.limit, end_token=stream_token
)
messages = await filter_events_for_client(
@@ -420,10 +405,6 @@ async def _room_initial_sync_joined(
now_token = self.hs.get_event_sources().get_current_token()
- limit = pagin_config.limit if pagin_config else None
- if limit is None:
- limit = 10
-
room_members = [
m
for m in current_state.values()
@@ -467,7 +448,7 @@ async def get_receipts() -> List[JsonDict]:
run_in_background(
self.store.get_recent_events_for_room,
room_id,
- limit=limit,
+ limit=pagin_config.limit,
end_token=now_token.room_key,
),
),
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index da1acea2755b..845f6833583d 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -50,6 +50,7 @@
from synapse.events import EventBase, relation_from_event
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
+from synapse.events.utils import maybe_upsert_event_field
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
from synapse.logging import opentracing
@@ -59,7 +60,6 @@
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import (
MutableStateMap,
PersistedEventPosition,
@@ -70,6 +70,7 @@
UserID,
create_requester,
)
+from synapse.types.state import StateFilter
from synapse.util import json_decoder, json_encoder, log_failure, unwrapFirstError
from synapse.util.async_helpers import Linearizer, gather_results
from synapse.util.caches.expiringcache import ExpiringCache
@@ -877,6 +878,36 @@ async def deduplicate_state_event(
return prev_event
return None
+ async def get_event_from_transaction(
+ self,
+ requester: Requester,
+ txn_id: str,
+ room_id: str,
+ ) -> Optional[EventBase]:
+ """For the given transaction ID and room ID, check if there is a matching event.
+ If so, fetch it and return it.
+
+ Args:
+ requester: The requester making the request in the context of which we want
+ to fetch the event.
+ txn_id: The transaction ID.
+ room_id: The room ID.
+
+ Returns:
+ An event if one could be found, None otherwise.
+ """
+ if requester.access_token_id:
+ existing_event_id = await self.store.get_event_id_from_transaction_id(
+ room_id,
+ requester.user.to_string(),
+ requester.access_token_id,
+ txn_id,
+ )
+ if existing_event_id:
+ return await self.store.get_event(existing_event_id)
+
+ return None
+
async def create_and_send_nonmember_event(
self,
requester: Requester,
@@ -956,18 +987,17 @@ async def create_and_send_nonmember_event(
# extremities to pile up, which in turn leads to state resolution
# taking longer.
async with self.limiter.queue(event_dict["room_id"]):
- if txn_id and requester.access_token_id:
- existing_event_id = await self.store.get_event_id_from_transaction_id(
- event_dict["room_id"],
- requester.user.to_string(),
- requester.access_token_id,
- txn_id,
+ if txn_id:
+ event = await self.get_event_from_transaction(
+ requester, txn_id, event_dict["room_id"]
)
- if existing_event_id:
- event = await self.store.get_event(existing_event_id)
+ if event:
# we know it was persisted, so must have a stream ordering
assert event.internal_metadata.stream_ordering
- return event, event.internal_metadata.stream_ordering
+ return (
+ event,
+ event.internal_metadata.stream_ordering,
+ )
event, context = await self.create_event(
requester,
@@ -1106,11 +1136,13 @@ async def create_new_client_event(
)
state_events = await self.store.get_events_as_list(state_event_ids)
# Create a StateMap[str]
- state_map = {(e.type, e.state_key): e.event_id for e in state_events}
+ current_state_ids = {
+ (e.type, e.state_key): e.event_id for e in state_events
+ }
# Actually strip down and only use the necessary auth events
auth_event_ids = self._event_auth_handler.compute_auth_events(
event=temp_event,
- current_state_ids=state_map,
+ current_state_ids=current_state_ids,
for_verification=False,
)
@@ -1360,8 +1392,16 @@ async def handle_new_client_event(
else:
try:
validate_event_for_room_version(event)
+ # If we are persisting a batch of events the event(s) needed to auth the
+ # current event may be part of the batch and will not be in the DB yet
+ event_id_to_event = {e.event_id: e for e, _ in events_and_context}
+ batched_auth_events = {}
+ for event_id in event.auth_event_ids():
+ auth_event = event_id_to_event.get(event_id)
+ if auth_event:
+ batched_auth_events[event_id] = auth_event
await self._event_auth_handler.check_auth_rules_from_context(
- event, context
+ event, batched_auth_events
)
except AuthError as err:
logger.warning("Denying new event %r because %s", event, err)
@@ -1390,7 +1430,7 @@ async def handle_new_client_event(
extra_users=extra_users,
),
run_in_background(
- self.cache_joined_hosts_for_event, event, context
+ self.cache_joined_hosts_for_events, events_and_context
).addErrback(
log_failure, "cache_joined_hosts_for_event failed"
),
@@ -1425,17 +1465,9 @@ async def _persist_events(
a room that has been un-partial stated.
"""
- for event, context in events_and_context:
- # Skip push notification actions for historical messages
- # because we don't want to notify people about old history back in time.
- # The historical messages also do not have the proper `context.current_state_ids`
- # and `state_groups` because they have `prev_events` that aren't persisted yet
- # (historical messages persisted in reverse-chronological order).
- if not event.internal_metadata.is_historical():
- with opentracing.start_active_span("calculate_push_actions"):
- await self._bulk_push_rule_evaluator.action_for_event_by_user(
- event, context
- )
+ await self._bulk_push_rule_evaluator.action_for_events_by_user(
+ events_and_context
+ )
try:
# If we're a worker we need to hit out to the master.
@@ -1491,62 +1523,65 @@ async def _persist_events(
await self.store.remove_push_actions_from_staging(event.event_id)
raise
- async def cache_joined_hosts_for_event(
- self, event: EventBase, context: EventContext
+ async def cache_joined_hosts_for_events(
+ self, events_and_context: List[Tuple[EventBase, EventContext]]
) -> None:
- """Precalculate the joined hosts at the event, when using Redis, so that
+ """Precalculate the joined hosts at each of the given events, when using Redis, so that
external federation senders don't have to recalculate it themselves.
"""
- if not self._external_cache.is_enabled():
- return
-
- # If external cache is enabled we should always have this.
- assert self._external_cache_joined_hosts_updates is not None
+ for event, _ in events_and_context:
+ if not self._external_cache.is_enabled():
+ return
- # We actually store two mappings, event ID -> prev state group,
- # state group -> joined hosts, which is much more space efficient
- # than event ID -> joined hosts.
- #
- # Note: We have to cache event ID -> prev state group, as we don't
- # store that in the DB.
- #
- # Note: We set the state group -> joined hosts cache if it hasn't been
- # set for a while, so that the expiry time is reset.
+ # If external cache is enabled we should always have this.
+ assert self._external_cache_joined_hosts_updates is not None
- state_entry = await self.state.resolve_state_groups_for_events(
- event.room_id, event_ids=event.prev_event_ids()
- )
+ # We actually store two mappings, event ID -> prev state group,
+ # state group -> joined hosts, which is much more space efficient
+ # than event ID -> joined hosts.
+ #
+ # Note: We have to cache event ID -> prev state group, as we don't
+ # store that in the DB.
+ #
+ # Note: We set the state group -> joined hosts cache if it hasn't been
+ # set for a while, so that the expiry time is reset.
- if state_entry.state_group:
- await self._external_cache.set(
- "event_to_prev_state_group",
- event.event_id,
- state_entry.state_group,
- expiry_ms=60 * 60 * 1000,
+ state_entry = await self.state.resolve_state_groups_for_events(
+ event.room_id, event_ids=event.prev_event_ids()
)
- if state_entry.state_group in self._external_cache_joined_hosts_updates:
- return
+ if state_entry.state_group:
+ await self._external_cache.set(
+ "event_to_prev_state_group",
+ event.event_id,
+ state_entry.state_group,
+ expiry_ms=60 * 60 * 1000,
+ )
- state = await state_entry.get_state(
- self._storage_controllers.state, StateFilter.all()
- )
- with opentracing.start_active_span("get_joined_hosts"):
- joined_hosts = await self.store.get_joined_hosts(
- event.room_id, state, state_entry
+ if state_entry.state_group in self._external_cache_joined_hosts_updates:
+ return
+
+ state = await state_entry.get_state(
+ self._storage_controllers.state, StateFilter.all()
)
+ with opentracing.start_active_span("get_joined_hosts"):
+ joined_hosts = await self.store.get_joined_hosts(
+ event.room_id, state, state_entry
+ )
- # Note that the expiry times must be larger than the expiry time in
- # _external_cache_joined_hosts_updates.
- await self._external_cache.set(
- "get_joined_hosts",
- str(state_entry.state_group),
- list(joined_hosts),
- expiry_ms=60 * 60 * 1000,
- )
+ # Note that the expiry times must be larger than the expiry time in
+ # _external_cache_joined_hosts_updates.
+ await self._external_cache.set(
+ "get_joined_hosts",
+ str(state_entry.state_group),
+ list(joined_hosts),
+ expiry_ms=60 * 60 * 1000,
+ )
- self._external_cache_joined_hosts_updates[state_entry.state_group] = None
+ self._external_cache_joined_hosts_updates[
+ state_entry.state_group
+ ] = None
async def _validate_canonical_alias(
self,
@@ -1705,12 +1740,15 @@ async def persist_and_notify_client_events(
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.INVITE:
- event.unsigned[
- "invite_room_state"
- ] = await self.store.get_stripped_room_state_from_event_context(
- context,
- self.room_prejoin_state_types,
- membership_user_id=event.sender,
+ maybe_upsert_event_field(
+ event,
+ event.unsigned,
+ "invite_room_state",
+ await self.store.get_stripped_room_state_from_event_context(
+ context,
+ self.room_prejoin_state_types,
+ membership_user_id=event.sender,
+ ),
)
invitee = UserID.from_string(event.state_key)
@@ -1728,11 +1766,14 @@ async def persist_and_notify_client_events(
event.signatures.update(returned_invite.signatures)
if event.content["membership"] == Membership.KNOCK:
- event.unsigned[
- "knock_room_state"
- ] = await self.store.get_stripped_room_state_from_event_context(
- context,
- self.room_prejoin_state_types,
+ maybe_upsert_event_field(
+ event,
+ event.unsigned,
+ "knock_room_state",
+ await self.store.get_stripped_room_state_from_event_context(
+ context,
+ self.room_prejoin_state_types,
+ ),
)
if event.type == EventTypes.Redaction:
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index d7a82269006a..03de6a4ba637 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -12,14 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import binascii
import inspect
+import json
import logging
-from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Generic,
+ List,
+ Optional,
+ Type,
+ TypeVar,
+ Union,
+)
from urllib.parse import urlencode, urlparse
import attr
+import unpaddedbase64
from authlib.common.security import generate_token
-from authlib.jose import JsonWebToken, jwt
+from authlib.jose import JsonWebToken, JWTClaims
+from authlib.jose.errors import InvalidClaimError, JoseError, MissingClaimError
from authlib.oauth2.auth import ClientAuth
from authlib.oauth2.rfc6749.parameters import prepare_grant_uri
from authlib.oidc.core import CodeIDToken, UserInfo
@@ -35,9 +49,12 @@
from twisted.web.client import readBody
from twisted.web.http_headers import Headers
+from synapse.api.errors import SynapseError
from synapse.config import ConfigError
from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig
from synapse.handlers.sso import MappingException, UserAttributes
+from synapse.http.server import finish_request
+from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable
from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart
@@ -88,6 +105,8 @@ class Token(TypedDict):
#: there is no real point of doing this in our case.
JWK = Dict[str, str]
+C = TypeVar("C")
+
#: A JWK Set, as per RFC7517 sec 5.
class JWKS(TypedDict):
@@ -247,6 +266,80 @@ async def handle_oidc_callback(self, request: SynapseRequest) -> None:
await oidc_provider.handle_oidc_callback(request, session_data, code)
+ async def handle_backchannel_logout(self, request: SynapseRequest) -> None:
+ """Handle an incoming request to /_synapse/client/oidc/backchannel_logout
+
+ This extracts the logout_token from the request and tries to figure out
+ which OpenID Provider it is comming from. This works by matching the iss claim
+ with the issuer and the aud claim with the client_id.
+
+ Since at this point we don't know who signed the JWT, we can't just
+ decode it using authlib since it will always verifies the signature. We
+ have to decode it manually without validating the signature. The actual JWT
+ verification is done in the `OidcProvider.handler_backchannel_logout` method,
+ once we figured out which provider sent the request.
+
+ Args:
+ request: the incoming request from the browser.
+ """
+ logout_token = parse_string(request, "logout_token")
+ if logout_token is None:
+ raise SynapseError(400, "Missing logout_token in request")
+
+ # A JWT looks like this:
+ # header.payload.signature
+ # where all parts are encoded with urlsafe base64.
+ # The aud and iss claims we care about are in the payload part, which
+ # is a JSON object.
+ try:
+ # By destructuring the list after splitting, we ensure that we have
+ # exactly 3 segments
+ _, payload, _ = logout_token.split(".")
+ except ValueError:
+ raise SynapseError(400, "Invalid logout_token in request")
+
+ try:
+ payload_bytes = unpaddedbase64.decode_base64(payload)
+ claims = json_decoder.decode(payload_bytes.decode("utf-8"))
+ except (json.JSONDecodeError, binascii.Error, UnicodeError):
+ raise SynapseError(400, "Invalid logout_token payload in request")
+
+ try:
+ # Let's extract the iss and aud claims
+ iss = claims["iss"]
+ aud = claims["aud"]
+ # The aud claim can be either a string or a list of string. Here we
+ # normalize it as a list of strings.
+ if isinstance(aud, str):
+ aud = [aud]
+
+ # Check that we have the right types for the aud and the iss claims
+ if not isinstance(iss, str) or not isinstance(aud, list):
+ raise TypeError()
+ for a in aud:
+ if not isinstance(a, str):
+ raise TypeError()
+
+ # At this point we properly checked both claims types
+ issuer: str = iss
+ audience: List[str] = aud
+ except (TypeError, KeyError):
+ raise SynapseError(400, "Invalid issuer/audience in logout_token")
+
+ # Now that we know the audience and the issuer, we can figure out from
+ # what provider it is coming from
+ oidc_provider: Optional[OidcProvider] = None
+ for provider in self._providers.values():
+ if provider.issuer == issuer and provider.client_id in audience:
+ oidc_provider = provider
+ break
+
+ if oidc_provider is None:
+ raise SynapseError(400, "Could not find the OP that issued this event")
+
+ # Ask the provider to handle the logout request.
+ await oidc_provider.handle_backchannel_logout(request, logout_token)
+
class OidcError(Exception):
"""Used to catch errors when calling the token_endpoint"""
@@ -275,6 +368,7 @@ def __init__(
provider: OidcProviderConfig,
):
self._store = hs.get_datastores().main
+ self._clock = hs.get_clock()
self._macaroon_generaton = macaroon_generator
@@ -341,6 +435,7 @@ def __init__(
self.idp_brand = provider.idp_brand
self._sso_handler = hs.get_sso_handler()
+ self._device_handler = hs.get_device_handler()
self._sso_handler.register_identity_provider(self)
@@ -399,6 +494,41 @@ def _validate_metadata(self, m: OpenIDProviderMetadata) -> None:
# If we're not using userinfo, we need a valid jwks to validate the ID token
m.validate_jwks_uri()
+ if self._config.backchannel_logout_enabled:
+ if not m.get("backchannel_logout_supported", False):
+ logger.warning(
+ "OIDC Back-Channel Logout is enabled for issuer %r"
+ "but it does not advertise support for it",
+ self.issuer,
+ )
+
+ elif not m.get("backchannel_logout_session_supported", False):
+ logger.warning(
+ "OIDC Back-Channel Logout is enabled and supported "
+ "by issuer %r but it might not send a session ID with "
+ "logout tokens, which is required for the logouts to work",
+ self.issuer,
+ )
+
+ if not self._config.backchannel_logout_ignore_sub:
+ # If OIDC backchannel logouts are enabled, the provider mapping provider
+ # should use the `sub` claim. We verify that by mapping a dumb user and
+ # see if we get back the sub claim
+ user = UserInfo({"sub": "thisisasubject"})
+ try:
+ subject = self._user_mapping_provider.get_remote_user_id(user)
+ if subject != user["sub"]:
+ raise ValueError("Unexpected subject")
+ except Exception:
+ logger.warning(
+ f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} "
+ "but it looks like the configured `user_mapping_provider` "
+ "does not use the `sub` claim as subject. If it is the case, "
+ "and you want Synapse to ignore the `sub` claim in OIDC "
+ "Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
+ "to `true` in the issuer config."
+ )
+
@property
def _uses_userinfo(self) -> bool:
"""Returns True if the ``userinfo_endpoint`` should be used.
@@ -414,6 +544,16 @@ def _uses_userinfo(self) -> bool:
or self._user_profile_method == "userinfo_endpoint"
)
+ @property
+ def issuer(self) -> str:
+ """The issuer identifying this provider."""
+ return self._config.issuer
+
+ @property
+ def client_id(self) -> str:
+ """The client_id used when interacting with this provider."""
+ return self._config.client_id
+
async def load_metadata(self, force: bool = False) -> OpenIDProviderMetadata:
"""Return the provider metadata.
@@ -647,7 +787,7 @@ async def _fetch_userinfo(self, token: Token) -> UserInfo:
Must include an ``access_token`` field.
Returns:
- UserInfo: an object representing the user.
+ an object representing the user.
"""
logger.debug("Using the OAuth2 access_token to request userinfo")
metadata = await self.load_metadata()
@@ -661,61 +801,99 @@ async def _fetch_userinfo(self, token: Token) -> UserInfo:
return UserInfo(resp)
- async def _parse_id_token(self, token: Token, nonce: str) -> CodeIDToken:
- """Return an instance of UserInfo from token's ``id_token``.
+ async def _verify_jwt(
+ self,
+ alg_values: List[str],
+ token: str,
+ claims_cls: Type[C],
+ claims_options: Optional[dict] = None,
+ claims_params: Optional[dict] = None,
+ ) -> C:
+ """Decode and validate a JWT, re-fetching the JWKS as needed.
Args:
- token: the token given by the ``token_endpoint``.
- Must include an ``id_token`` field.
- nonce: the nonce value originally sent in the initial authorization
- request. This value should match the one inside the token.
+ alg_values: list of `alg` values allowed when verifying the JWT.
+ token: the JWT.
+ claims_cls: the JWTClaims class to use to validate the claims.
+ claims_options: dict of options passed to the `claims_cls` constructor.
+ claims_params: dict of params passed to the `claims_cls` constructor.
Returns:
- The decoded claims in the ID token.
+ The decoded claims in the JWT.
"""
- metadata = await self.load_metadata()
- claims_params = {
- "nonce": nonce,
- "client_id": self._client_auth.client_id,
- }
- if "access_token" in token:
- # If we got an `access_token`, there should be an `at_hash` claim
- # in the `id_token` that we can check against.
- claims_params["access_token"] = token["access_token"]
-
- alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
jwt = JsonWebToken(alg_values)
- claim_options = {"iss": {"values": [metadata["issuer"]]}}
-
- id_token = token["id_token"]
- logger.debug("Attempting to decode JWT id_token %r", id_token)
+ logger.debug("Attempting to decode JWT (%s) %r", claims_cls.__name__, token)
# Try to decode the keys in cache first, then retry by forcing the keys
# to be reloaded
jwk_set = await self.load_jwks()
try:
claims = jwt.decode(
- id_token,
+ token,
key=jwk_set,
- claims_cls=CodeIDToken,
- claims_options=claim_options,
+ claims_cls=claims_cls,
+ claims_options=claims_options,
claims_params=claims_params,
)
except ValueError:
logger.info("Reloading JWKS after decode error")
jwk_set = await self.load_jwks(force=True) # try reloading the jwks
claims = jwt.decode(
- id_token,
+ token,
key=jwk_set,
- claims_cls=CodeIDToken,
- claims_options=claim_options,
+ claims_cls=claims_cls,
+ claims_options=claims_options,
claims_params=claims_params,
)
- logger.debug("Decoded id_token JWT %r; validating", claims)
+ logger.debug("Decoded JWT (%s) %r; validating", claims_cls.__name__, claims)
- claims.validate(leeway=120) # allows 2 min of clock skew
+ claims.validate(
+ now=self._clock.time(), leeway=120
+ ) # allows 2 min of clock skew
+ return claims
+
+ async def _parse_id_token(self, token: Token, nonce: str) -> CodeIDToken:
+ """Return an instance of UserInfo from token's ``id_token``.
+
+ Args:
+ token: the token given by the ``token_endpoint``.
+ Must include an ``id_token`` field.
+ nonce: the nonce value originally sent in the initial authorization
+ request. This value should match the one inside the token.
+
+ Returns:
+ The decoded claims in the ID token.
+ """
+ id_token = token.get("id_token")
+
+ # That has been theoritically been checked by the caller, so even though
+ # assertion are not enabled in production, it is mainly here to appease mypy
+ assert id_token is not None
+
+ metadata = await self.load_metadata()
+
+ claims_params = {
+ "nonce": nonce,
+ "client_id": self._client_auth.client_id,
+ }
+ if "access_token" in token:
+ # If we got an `access_token`, there should be an `at_hash` claim
+ # in the `id_token` that we can check against.
+ claims_params["access_token"] = token["access_token"]
+
+ claims_options = {"iss": {"values": [metadata["issuer"]]}}
+
+ alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
+
+ claims = await self._verify_jwt(
+ alg_values=alg_values,
+ token=id_token,
+ claims_cls=CodeIDToken,
+ claims_options=claims_options,
+ claims_params=claims_params,
+ )
return claims
@@ -1036,6 +1214,146 @@ def _remote_id_from_userinfo(self, userinfo: UserInfo) -> str:
# to be strings.
return str(remote_user_id)
+ async def handle_backchannel_logout(
+ self, request: SynapseRequest, logout_token: str
+ ) -> None:
+ """Handle an incoming request to /_synapse/client/oidc/backchannel_logout
+
+ The OIDC Provider posts a logout token to this endpoint when a user
+ session ends. That token is a JWT signed with the same keys as
+ ID tokens. The OpenID Connect Back-Channel Logout draft explains how to
+ validate the JWT and figure out what session to end.
+
+ Args:
+ request: The request to respond to
+ logout_token: The logout token (a JWT) extracted from the request body
+ """
+ # Back-Channel Logout can be disabled in the config, hence this check.
+ # This is not that important for now since Synapse is registered
+ # manually to the OP, so not specifying the backchannel-logout URI is
+ # as effective than disabling it here. It might make more sense if we
+ # support dynamic registration in Synapse at some point.
+ if not self._config.backchannel_logout_enabled:
+ logger.warning(
+ f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config"
+ )
+
+ # TODO: this responds with a 400 status code, which is what the OIDC
+ # Back-Channel Logout spec expects, but spec also suggests answering with
+ # a JSON object, with the `error` and `error_description` fields set, which
+ # we are not doing here.
+ # See https://openid.net/specs/openid-connect-backchannel-1_0.html#BCResponse
+ raise SynapseError(
+ 400, "OpenID Connect Back-Channel Logout is disabled for this provider"
+ )
+
+ metadata = await self.load_metadata()
+
+ # As per OIDC Back-Channel Logout 1.0 sec. 2.4:
+ # A Logout Token MUST be signed and MAY also be encrypted. The same
+ # keys are used to sign and encrypt Logout Tokens as are used for ID
+ # Tokens. If the Logout Token is encrypted, it SHOULD replicate the
+ # iss (issuer) claim in the JWT Header Parameters, as specified in
+ # Section 5.3 of [JWT].
+ alg_values = metadata.get("id_token_signing_alg_values_supported", ["RS256"])
+
+ # As per sec. 2.6:
+ # 3. Validate the iss, aud, and iat Claims in the same way they are
+ # validated in ID Tokens.
+ # Which means the audience should contain Synapse's client_id and the
+ # issuer should be the IdP issuer
+ claims_options = {
+ "iss": {"values": [metadata["issuer"]]},
+ "aud": {"values": [self.client_id]},
+ }
+
+ try:
+ claims = await self._verify_jwt(
+ alg_values=alg_values,
+ token=logout_token,
+ claims_cls=LogoutToken,
+ claims_options=claims_options,
+ )
+ except JoseError:
+ logger.exception("Invalid logout_token")
+ raise SynapseError(400, "Invalid logout_token")
+
+ # As per sec. 2.6:
+ # 4. Verify that the Logout Token contains a sub Claim, a sid Claim,
+ # or both.
+ # 5. Verify that the Logout Token contains an events Claim whose
+ # value is JSON object containing the member name
+ # http://schemas.openid.net/event/backchannel-logout.
+ # 6. Verify that the Logout Token does not contain a nonce Claim.
+ # This is all verified by the LogoutToken claims class, so at this
+ # point the `sid` claim exists and is a string.
+ sid: str = claims.get("sid")
+
+ # If the `sub` claim was included in the logout token, we check that it matches
+ # that it matches the right user. We can have cases where the `sub` claim is not
+ # the ID saved in database, so we let admins disable this check in config.
+ sub: Optional[str] = claims.get("sub")
+ expected_user_id: Optional[str] = None
+ if sub is not None and not self._config.backchannel_logout_ignore_sub:
+ expected_user_id = await self._store.get_user_by_external_id(
+ self.idp_id, sub
+ )
+
+ # Invalidate any running user-mapping sessions, in-flight login tokens and
+ # active devices
+ await self._sso_handler.revoke_sessions_for_provider_session_id(
+ auth_provider_id=self.idp_id,
+ auth_provider_session_id=sid,
+ expected_user_id=expected_user_id,
+ )
+
+ request.setResponseCode(200)
+ request.setHeader(b"Cache-Control", b"no-cache, no-store")
+ request.setHeader(b"Pragma", b"no-cache")
+ finish_request(request)
+
+
+class LogoutToken(JWTClaims):
+ """
+ Holds and verify claims of a logout token, as per
+ https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken
+ """
+
+ REGISTERED_CLAIMS = ["iss", "sub", "aud", "iat", "jti", "events", "sid"]
+
+ def validate(self, now: Optional[int] = None, leeway: int = 0) -> None:
+ """Validate everything in claims payload."""
+ super().validate(now, leeway)
+ self.validate_sid()
+ self.validate_events()
+ self.validate_nonce()
+
+ def validate_sid(self) -> None:
+ """Ensure the sid claim is present"""
+ sid = self.get("sid")
+ if not sid:
+ raise MissingClaimError("sid")
+
+ if not isinstance(sid, str):
+ raise InvalidClaimError("sid")
+
+ def validate_nonce(self) -> None:
+ """Ensure the nonce claim is absent"""
+ if "nonce" in self:
+ raise InvalidClaimError("nonce")
+
+ def validate_events(self) -> None:
+ """Ensure the events claim is present and with the right value"""
+ events = self.get("events")
+ if not events:
+ raise MissingClaimError("events")
+
+ if not isinstance(events, dict):
+ raise InvalidClaimError("events")
+
+ if "http://schemas.openid.net/event/backchannel-logout" not in events:
+ raise InvalidClaimError("events")
+
# number of seconds a newly-generated client secret should be valid for
CLIENT_SECRET_VALIDITY_SECONDS = 3600
@@ -1105,6 +1423,7 @@ def _get_secret(self) -> bytes:
logger.info(
"Generating new JWT for %s: %s %s", self._oauth_issuer, header, payload
)
+ jwt = JsonWebToken(header["alg"])
self._cached_secret = jwt.encode(header, payload, self._key.key)
self._cached_secret_replacement_time = (
expires_at - CLIENT_SECRET_MIN_VALIDITY_SECONDS
@@ -1116,12 +1435,10 @@ class UserAttributeDict(TypedDict):
localpart: Optional[str]
confirm_localpart: bool
display_name: Optional[str]
+ picture: Optional[str] # may be omitted by older `OidcMappingProviders`
emails: List[str]
-C = TypeVar("C")
-
-
class OidcMappingProvider(Generic[C]):
"""A mapping provider maps a UserInfo object to user attributes.
@@ -1204,6 +1521,7 @@ def jinja_finalize(thing: Any) -> Any:
@attr.s(slots=True, frozen=True, auto_attribs=True)
class JinjaOidcMappingConfig:
subject_claim: str
+ picture_claim: str
localpart_template: Optional[Template]
display_name_template: Optional[Template]
email_template: Optional[Template]
@@ -1223,6 +1541,7 @@ def __init__(self, config: JinjaOidcMappingConfig):
@staticmethod
def parse_config(config: dict) -> JinjaOidcMappingConfig:
subject_claim = config.get("subject_claim", "sub")
+ picture_claim = config.get("picture_claim", "picture")
def parse_template_config(option_name: str) -> Optional[Template]:
if option_name not in config:
@@ -1256,6 +1575,7 @@ def parse_template_config(option_name: str) -> Optional[Template]:
return JinjaOidcMappingConfig(
subject_claim=subject_claim,
+ picture_claim=picture_claim,
localpart_template=localpart_template,
display_name_template=display_name_template,
email_template=email_template,
@@ -1295,10 +1615,13 @@ def render_template_field(template: Optional[Template]) -> Optional[str]:
if email:
emails.append(email)
+ picture = userinfo.get("picture")
+
return UserAttributeDict(
localpart=localpart,
display_name=display_name,
emails=emails,
+ picture=picture,
confirm_localpart=self._config.confirm_localpart,
)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 1f83bab836e6..8c8ff18a1a61 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -27,9 +27,9 @@
from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.admin._base import assert_user_is_admin
-from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, StreamKeyType
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import ReadWriteLock
from synapse.util.stringutils import random_string
from synapse.visibility import filter_events_for_client
@@ -448,6 +448,12 @@ async def get_messages(
if pagin_config.from_token:
from_token = pagin_config.from_token
+ elif pagin_config.direction == "f":
+ from_token = (
+ await self.hs.get_event_sources().get_start_token_for_pagination(
+ room_id
+ )
+ )
else:
from_token = (
await self.hs.get_event_sources().get_current_token_for_pagination(
@@ -458,11 +464,6 @@ async def get_messages(
# `/messages` should still works with live tokens when manually provided.
assert from_token.room_key.topological is not None
- if pagin_config.limit is None:
- # This shouldn't happen as we've set a default limit before this
- # gets called.
- raise Exception("limit not set")
-
room_token = from_token.room_key
async with self.pagination_lock.read(room_id):
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 4e575ffbaa8d..2af90b25a39c 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -201,7 +201,7 @@ async def current_state_for_users(
"""Get the current presence state for multiple users.
Returns:
- dict: `user_id` -> `UserPresenceState`
+ A mapping of `user_id` -> `UserPresenceState`
"""
states = {}
missing = []
@@ -256,7 +256,7 @@ async def bump_presence_active_time(self, user: UserID) -> None:
with the app.
"""
- async def update_external_syncs_row(
+ async def update_external_syncs_row( # noqa: B027 (no-op by design)
self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int
) -> None:
"""Update the syncing users for an external process as a delta.
@@ -272,7 +272,9 @@ async def update_external_syncs_row(
sync_time_msec: Time in ms when the user was last syncing
"""
- async def update_external_syncs_clear(self, process_id: str) -> None:
+ async def update_external_syncs_clear( # noqa: B027 (no-op by design)
+ self, process_id: str
+ ) -> None:
"""Marks all users that had been marked as syncing by a given process
as offline.
@@ -476,7 +478,7 @@ async def user_syncing(
return _NullContextManager()
prev_state = await self.current_state_for_user(user_id)
- if prev_state != PresenceState.BUSY:
+ if prev_state.state != PresenceState.BUSY:
# We set state here but pass ignore_status_msg = True as we don't want to
# cause the status message to be cleared.
# Note that this causes last_active_ts to be incremented which is not
@@ -1596,7 +1598,9 @@ async def get_new_events(
self,
user: UserID,
from_key: Optional[int],
- limit: Optional[int] = None,
+ # Having a default limit doesn't match the EventSource API, but some
+ # callers do not provide it. It is unused in this class.
+ limit: int = 0,
room_ids: Optional[Collection[str]] = None,
is_guest: bool = False,
explicit_room_id: Optional[str] = None,
@@ -1688,10 +1692,12 @@ async def get_new_events(
if from_key is not None:
# First get all users that have had a presence update
- updated_users = stream_change_cache.get_all_entities_changed(from_key)
+ result = stream_change_cache.get_all_entities_changed(from_key)
# Cross-reference users we're interested in with those that have had updates.
- if updated_users is not None:
+ if result.hit:
+ updated_users = result.entities
+
# If we have the full list of changes for presence we can
# simply check which ones share a room with the user.
get_updates_counter.labels("stream").inc()
@@ -1760,14 +1766,14 @@ async def _filter_all_presence_updates_for_user(
Returns:
A list of presence states for the given user to receive.
"""
+ updated_users = None
if from_key:
# Only return updates since the last sync
- updated_users = self.store.presence_stream_cache.get_all_entities_changed(
- from_key
- )
- if not updated_users:
- updated_users = []
+ result = self.store.presence_stream_cache.get_all_entities_changed(from_key)
+ if result.hit:
+ updated_users = result.entities
+ if updated_users is not None:
# Get the actual presence update for each change
users_to_state = await self.get_presence_handler().current_state_for_users(
updated_users
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index d8ff5289b56f..4bf9a047a3bc 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -307,7 +307,11 @@ async def check_avatar_size_and_mime_type(self, mxc: str) -> bool:
if not self.max_avatar_size and not self.allowed_avatar_mimetypes:
return True
- server_name, _, media_id = parse_and_validate_mxc_uri(mxc)
+ host, port, media_id = parse_and_validate_mxc_uri(mxc)
+ if port is not None:
+ server_name = host + ":" + str(port)
+ else:
+ server_name = host
if server_name == self.server_name:
media_info = await self.store.get_local_media(media_id)
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 4a7ec9e42670..6a4fed115671 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -92,7 +92,6 @@ async def _received_remote_receipt(self, origin: str, content: JsonDict) -> None
continue
# Check if these receipts apply to a thread.
- thread_id = None
data = user_values.get("data", {})
thread_id = data.get("thread_id")
# If the thread ID is invalid, consider it missing.
@@ -257,7 +256,7 @@ async def get_new_events(
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index ca1c7a18667e..c611efb760e4 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -38,6 +38,7 @@
)
from synapse.appservice import ApplicationService
from synapse.config.server import is_threepid_reserved
+from synapse.handlers.device import DeviceHandler
from synapse.http.servlet import assert_params_in_dict
from synapse.replication.http.login import RegisterDeviceReplicationServlet
from synapse.replication.http.register import (
@@ -45,8 +46,8 @@
ReplicationRegisterServlet,
)
from synapse.spam_checker_api import RegistrationBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, UserID, create_requester
+from synapse.types.state import StateFilter
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -841,6 +842,9 @@ class and RegisterDeviceReplicationServlet.
refresh_token = None
refresh_token_id = None
+ # This can only run on the main process.
+ assert isinstance(self.device_handler, DeviceHandler)
+
registered_device_id = await self.device_handler.check_device_registered(
user_id,
device_id,
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index 63bc6a7aa55f..e96f9999a8d6 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -11,17 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import enum
import logging
-from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tuple
+from typing import TYPE_CHECKING, Collection, Dict, FrozenSet, Iterable, List, Optional
import attr
-from synapse.api.constants import RelationTypes
+from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event
+from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import trace
-from synapse.storage.databases.main.relations import _RelatedEvent
-from synapse.types import JsonDict, Requester, StreamToken, UserID
+from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
+from synapse.streams.config import PaginationConfig
+from synapse.types import JsonDict, Requester, UserID
+from synapse.util.async_helpers import gather_results
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
@@ -31,6 +35,13 @@
logger = logging.getLogger(__name__)
+class ThreadsListInclude(str, enum.Enum):
+ """Valid values for the 'include' flag of /threads."""
+
+ all = "all"
+ participated = "participated"
+
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _ThreadAggregation:
# The latest event in the thread.
@@ -66,19 +77,17 @@ def __init__(self, hs: "HomeServer"):
self._clock = hs.get_clock()
self._event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
+ self._event_creation_handler = hs.get_event_creation_handler()
async def get_relations(
self,
requester: Requester,
event_id: str,
room_id: str,
+ pagin_config: PaginationConfig,
+ include_original_event: bool,
relation_type: Optional[str] = None,
event_type: Optional[str] = None,
- limit: int = 5,
- direction: str = "b",
- from_token: Optional[StreamToken] = None,
- to_token: Optional[StreamToken] = None,
- include_original_event: bool = False,
) -> JsonDict:
"""Get related events of a event, ordered by topological ordering.
@@ -88,14 +97,10 @@ async def get_relations(
requester: The user requesting the relations.
event_id: Fetch events that relate to this event ID.
room_id: The room the event belongs to.
+ pagin_config: The pagination config rules to apply, if any.
+ include_original_event: Whether to include the parent event.
relation_type: Only fetch events with this relation type, if given.
event_type: Only fetch events with this event type, if given.
- limit: Only fetch the most recent `limit` events.
- direction: Whether to fetch the most recent first (`"b"`) or the
- oldest first (`"f"`).
- from_token: Fetch rows from the given token, or from the start if None.
- to_token: Fetch rows up to the given token, or up to the end if None.
- include_original_event: Whether to include the parent event.
Returns:
The pagination chunk.
@@ -123,10 +128,10 @@ async def get_relations(
room_id=room_id,
relation_type=relation_type,
event_type=event_type,
- limit=limit,
- direction=direction,
- from_token=from_token,
- to_token=to_token,
+ limit=pagin_config.limit,
+ direction=pagin_config.direction,
+ from_token=pagin_config.from_token,
+ to_token=pagin_config.to_token,
)
events = await self._main_store.get_events_as_list(
@@ -162,90 +167,167 @@ async def get_relations(
if next_token:
return_value["next_batch"] = await next_token.to_string(self._main_store)
- if from_token:
- return_value["prev_batch"] = await from_token.to_string(self._main_store)
+ if pagin_config.from_token:
+ return_value["prev_batch"] = await pagin_config.from_token.to_string(
+ self._main_store
+ )
return return_value
- async def get_relations_for_event(
+ async def redact_events_related_to(
self,
+ requester: Requester,
event_id: str,
- event: EventBase,
- room_id: str,
- relation_type: str,
- ignored_users: FrozenSet[str] = frozenset(),
- ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
- """Get a list of events which relate to an event, ordered by topological ordering.
+ initial_redaction_event: EventBase,
+ relation_types: List[str],
+ ) -> None:
+ """Redacts all events related to the given event ID with one of the given
+ relation types.
- Args:
- event_id: Fetch events that relate to this event ID.
- event: The matching EventBase to event_id.
- room_id: The room the event belongs to.
- relation_type: The type of relation.
- ignored_users: The users ignored by the requesting user.
+ This method is expected to be called when redacting the event referred to by
+ the given event ID.
- Returns:
- List of event IDs that match relations requested. The rows are of
- the form `{"event_id": "..."}`.
- """
+ If an event cannot be redacted (e.g. because of insufficient permissions), log
+ the error and try to redact the next one.
- # Call the underlying storage method, which is cached.
- related_events, next_token = await self._main_store.get_relations_for_event(
- event_id, event, room_id, relation_type, direction="f"
+ Args:
+ requester: The requester to redact events on behalf of.
+ event_id: The event IDs to look and redact relations of.
+ initial_redaction_event: The redaction for the event referred to by
+ event_id.
+ relation_types: The types of relations to look for.
+
+ Raises:
+ ShadowBanError if the requester is shadow-banned
+ """
+ related_event_ids = (
+ await self._main_store.get_all_relations_for_event_with_types(
+ event_id, relation_types
+ )
)
- # Filter out ignored users and convert to the expected format.
- related_events = [
- event for event in related_events if event.sender not in ignored_users
- ]
-
- return related_events, next_token
+ for related_event_id in related_event_ids:
+ try:
+ await self._event_creation_handler.create_and_send_nonmember_event(
+ requester,
+ {
+ "type": EventTypes.Redaction,
+ "content": initial_redaction_event.content,
+ "room_id": initial_redaction_event.room_id,
+ "sender": requester.user.to_string(),
+ "redacts": related_event_id,
+ },
+ ratelimit=False,
+ )
+ except SynapseError as e:
+ logger.warning(
+ "Failed to redact event %s (related to event %s): %s",
+ related_event_id,
+ event_id,
+ e.msg,
+ )
- async def get_annotations_for_event(
- self,
- event_id: str,
- room_id: str,
- limit: int = 5,
- ignored_users: FrozenSet[str] = frozenset(),
- ) -> List[JsonDict]:
- """Get a list of annotations on the event, grouped by event type and
+ async def get_annotations_for_events(
+ self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
+ ) -> Dict[str, List[JsonDict]]:
+ """Get a list of annotations to the given events, grouped by event type and
aggregation key, sorted by count.
- This is used e.g. to get the what and how many reactions have happend
+ This is used e.g. to get the what and how many reactions have happened
on an event.
Args:
- event_id: Fetch events that relate to this event ID.
- room_id: The room the event belongs to.
- limit: Only fetch the `limit` groups.
+ event_ids: Fetch events that relate to these event IDs.
ignored_users: The users ignored by the requesting user.
Returns:
- List of groups of annotations that match. Each row is a dict with
- `type`, `key` and `count` fields.
+ A map of event IDs to a list of groups of annotations that match.
+ Each entry is a dict with `type`, `key` and `count` fields.
"""
# Get the base results for all users.
- full_results = await self._main_store.get_aggregation_groups_for_event(
- event_id, room_id, limit
+ full_results = await self._main_store.get_aggregation_groups_for_events(
+ event_ids
)
+ # Avoid additional logic if there are no ignored users.
+ if not ignored_users:
+ return {
+ event_id: results
+ for event_id, results in full_results.items()
+ if results
+ }
+
# Then subtract off the results for any ignored users.
ignored_results = await self._main_store.get_aggregation_groups_for_users(
- event_id, room_id, limit, ignored_users
+ [event_id for event_id, results in full_results.items() if results],
+ ignored_users,
)
- filtered_results = []
- for result in full_results:
- key = (result["type"], result["key"])
- if key in ignored_results:
- result = result.copy()
- result["count"] -= ignored_results[key]
- if result["count"] <= 0:
- continue
- filtered_results.append(result)
+ filtered_results = {}
+ for event_id, results in full_results.items():
+ # If no annotations, skip.
+ if not results:
+ continue
+
+ # If there are not ignored results for this event, copy verbatim.
+ if event_id not in ignored_results:
+ filtered_results[event_id] = results
+ continue
+
+ # Otherwise, subtract out the ignored results.
+ event_ignored_results = ignored_results[event_id]
+ for result in results:
+ key = (result["type"], result["key"])
+ if key in event_ignored_results:
+ # Ensure to not modify the cache.
+ result = result.copy()
+ result["count"] -= event_ignored_results[key]
+ if result["count"] <= 0:
+ continue
+ filtered_results.setdefault(event_id, []).append(result)
return filtered_results
+ async def get_references_for_events(
+ self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
+ ) -> Dict[str, List[_RelatedEvent]]:
+ """Get a list of references to the given events.
+
+ Args:
+ event_ids: Fetch events that relate to this event ID.
+ ignored_users: The users ignored by the requesting user.
+
+ Returns:
+ A map of event IDs to a list related events.
+ """
+
+ related_events = await self._main_store.get_references_for_events(event_ids)
+
+ # Avoid additional logic if there are no ignored users.
+ if not ignored_users:
+ return {
+ event_id: results
+ for event_id, results in related_events.items()
+ if results
+ }
+
+ # Filter out ignored users.
+ results = {}
+ for event_id, events in related_events.items():
+ # If no references, skip.
+ if not events:
+ continue
+
+ # Filter ignored users out.
+ events = [event for event in events if event.sender not in ignored_users]
+ # If there are no events left, skip this event.
+ if not events:
+ continue
+
+ results[event_id] = events
+
+ return results
+
async def _get_threads_for_events(
self,
events_by_id: Dict[str, EventBase],
@@ -308,59 +390,66 @@ async def _get_threads_for_events(
results = {}
for event_id, summary in summaries.items():
- if summary:
- thread_count, latest_thread_event = summary
-
- # Subtract off the count of any ignored users.
- for ignored_user in ignored_users:
- thread_count -= ignored_results.get((event_id, ignored_user), 0)
-
- # This is gnarly, but if the latest event is from an ignored user,
- # attempt to find one that isn't from an ignored user.
- if latest_thread_event.sender in ignored_users:
- room_id = latest_thread_event.room_id
-
- # If the root event is not found, something went wrong, do
- # not include a summary of the thread.
- event = await self._event_handler.get_event(user, room_id, event_id)
- if event is None:
- continue
+ # If no thread, skip.
+ if not summary:
+ continue
- potential_events, _ = await self.get_relations_for_event(
- event_id,
- event,
- room_id,
- RelationTypes.THREAD,
- ignored_users,
- )
+ thread_count, latest_thread_event = summary
- # If all found events are from ignored users, do not include
- # a summary of the thread.
- if not potential_events:
- continue
+ # Subtract off the count of any ignored users.
+ for ignored_user in ignored_users:
+ thread_count -= ignored_results.get((event_id, ignored_user), 0)
- # The *last* event returned is the one that is cared about.
- event = await self._event_handler.get_event(
- user, room_id, potential_events[-1].event_id
- )
- # It is unexpected that the event will not exist.
- if event is None:
- logger.warning(
- "Unable to fetch latest event in a thread with event ID: %s",
- potential_events[-1].event_id,
- )
- continue
- latest_thread_event = event
-
- results[event_id] = _ThreadAggregation(
- latest_event=latest_thread_event,
- count=thread_count,
- # If there's a thread summary it must also exist in the
- # participated dictionary.
- current_user_participated=events_by_id[event_id].sender == user_id
- or participated[event_id],
+ # This is gnarly, but if the latest event is from an ignored user,
+ # attempt to find one that isn't from an ignored user.
+ if latest_thread_event.sender in ignored_users:
+ room_id = latest_thread_event.room_id
+
+ # If the root event is not found, something went wrong, do
+ # not include a summary of the thread.
+ event = await self._event_handler.get_event(user, room_id, event_id)
+ if event is None:
+ continue
+
+ # Attempt to find another event to use as the latest event.
+ potential_events, _ = await self._main_store.get_relations_for_event(
+ event_id, event, room_id, RelationTypes.THREAD, direction="f"
)
+ # Filter out ignored users.
+ potential_events = [
+ event
+ for event in potential_events
+ if event.sender not in ignored_users
+ ]
+
+ # If all found events are from ignored users, do not include
+ # a summary of the thread.
+ if not potential_events:
+ continue
+
+ # The *last* event returned is the one that is cared about.
+ event = await self._event_handler.get_event(
+ user, room_id, potential_events[-1].event_id
+ )
+ # It is unexpected that the event will not exist.
+ if event is None:
+ logger.warning(
+ "Unable to fetch latest event in a thread with event ID: %s",
+ potential_events[-1].event_id,
+ )
+ continue
+ latest_thread_event = event
+
+ results[event_id] = _ThreadAggregation(
+ latest_event=latest_thread_event,
+ count=thread_count,
+ # If there's a thread summary it must also exist in the
+ # participated dictionary.
+ current_user_participated=events_by_id[event_id].sender == user_id
+ or participated[event_id],
+ )
+
return results
@trace
@@ -438,48 +527,131 @@ async def get_bundled_aggregations(
# (as that is what makes it part of the thread).
relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
- # Fetch other relations per event.
- for event in events_by_id.values():
- # Fetch any annotations (ie, reactions) to bundle with this event.
- annotations = await self.get_annotations_for_event(
- event.event_id, event.room_id, ignored_users=ignored_users
+ async def _fetch_annotations() -> None:
+ """Fetch any annotations (ie, reactions) to bundle with this event."""
+ annotations_by_event_id = await self.get_annotations_for_events(
+ events_by_id.keys(), ignored_users=ignored_users
)
- if annotations:
- results.setdefault(
- event.event_id, BundledAggregations()
- ).annotations = {"chunk": annotations}
-
- # Fetch any references to bundle with this event.
- references, next_token = await self.get_relations_for_event(
- event.event_id,
- event,
- event.room_id,
- RelationTypes.REFERENCE,
- ignored_users=ignored_users,
+ for event_id, annotations in annotations_by_event_id.items():
+ if annotations:
+ results.setdefault(event_id, BundledAggregations()).annotations = {
+ "chunk": annotations
+ }
+
+ async def _fetch_references() -> None:
+ """Fetch any references to bundle with this event."""
+ references_by_event_id = await self.get_references_for_events(
+ events_by_id.keys(), ignored_users=ignored_users
+ )
+ for event_id, references in references_by_event_id.items():
+ if references:
+ results.setdefault(event_id, BundledAggregations()).references = {
+ "chunk": [{"event_id": ev.event_id} for ev in references]
+ }
+
+ async def _fetch_edits() -> None:
+ """
+ Fetch any edits (but not for redacted events).
+
+ Note that there is no use in limiting edits by ignored users since the
+ parent event should be ignored in the first place if the user is ignored.
+ """
+ edits = await self._main_store.get_applicable_edits(
+ [
+ event_id
+ for event_id, event in events_by_id.items()
+ if not event.internal_metadata.is_redacted()
+ ]
+ )
+ for event_id, edit in edits.items():
+ results.setdefault(event_id, BundledAggregations()).replace = edit
+
+ # Parallelize the calls for annotations, references, and edits since they
+ # are unrelated.
+ await make_deferred_yieldable(
+ gather_results(
+ (
+ run_in_background(_fetch_annotations),
+ run_in_background(_fetch_references),
+ run_in_background(_fetch_edits),
+ )
)
- if references:
- aggregations = results.setdefault(event.event_id, BundledAggregations())
- aggregations.references = {
- "chunk": [{"event_id": ev.event_id} for ev in references]
- }
-
- if next_token:
- aggregations.references["next_batch"] = await next_token.to_string(
- self._main_store
- )
-
- # Fetch any edits (but not for redacted events).
- #
- # Note that there is no use in limiting edits by ignored users since the
- # parent event should be ignored in the first place if the user is ignored.
- edits = await self._main_store.get_applicable_edits(
- [
- event_id
- for event_id, event in events_by_id.items()
- if not event.internal_metadata.is_redacted()
- ]
)
- for event_id, edit in edits.items():
- results.setdefault(event_id, BundledAggregations()).replace = edit
return results
+
+ async def get_threads(
+ self,
+ requester: Requester,
+ room_id: str,
+ include: ThreadsListInclude,
+ limit: int = 5,
+ from_token: Optional[ThreadsNextBatch] = None,
+ ) -> JsonDict:
+ """Get related events of a event, ordered by topological ordering.
+
+ Args:
+ requester: The user requesting the relations.
+ room_id: The room the event belongs to.
+ include: One of "all" or "participated" to indicate which threads should
+ be returned.
+ limit: Only fetch the most recent `limit` events.
+ from_token: Fetch rows from the given token, or from the start if None.
+
+ Returns:
+ The pagination chunk.
+ """
+
+ user_id = requester.user.to_string()
+
+ # TODO Properly handle a user leaving a room.
+ (_, member_event_id) = await self._auth.check_user_in_room_or_world_readable(
+ room_id, requester, allow_departed_users=True
+ )
+
+ # Note that ignored users are not passed into get_threads
+ # below. Ignored users are handled in filter_events_for_client (and by
+ # not passing them in here we should get a better cache hit rate).
+ thread_roots, next_batch = await self._main_store.get_threads(
+ room_id=room_id, limit=limit, from_token=from_token
+ )
+
+ events = await self._main_store.get_events_as_list(thread_roots)
+
+ if include == ThreadsListInclude.participated:
+ # Pre-seed thread participation with whether the requester sent the event.
+ participated = {event.event_id: event.sender == user_id for event in events}
+ # For events the requester did not send, check the database for whether
+ # the requester sent a threaded reply.
+ participated.update(
+ await self._main_store.get_threads_participated(
+ [eid for eid, p in participated.items() if not p],
+ user_id,
+ )
+ )
+
+ # Limit the returned threads to those the user has participated in.
+ events = [event for event in events if participated[event.event_id]]
+
+ events = await filter_events_for_client(
+ self._storage_controllers,
+ user_id,
+ events,
+ is_peeking=(member_event_id is None),
+ )
+
+ aggregations = await self.get_bundled_aggregations(
+ events, requester.user.to_string()
+ )
+
+ now = self._clock.time_msec()
+ serialized_events = self._event_serializer.serialize_events(
+ events, now, bundle_aggregations=aggregations
+ )
+
+ return_value: JsonDict = {"chunk": serialized_events}
+
+ if next_batch:
+ return_value["next_batch"] = str(next_batch)
+
+ return return_value
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 57ab05ad25d9..f81241c2b335 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -49,7 +49,6 @@
from synapse.api.errors import (
AuthError,
Codes,
- HttpResponseException,
LimitExceededError,
NotFoundError,
StoreError,
@@ -60,11 +59,9 @@
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_and_fixup_power_levels_contents
-from synapse.federation.federation_client import InvalidResponseError
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin
-from synapse.storage.state import StateFilter
from synapse.streams import EventSource
from synapse.types import (
JsonDict,
@@ -79,6 +76,7 @@
UserID,
create_requester,
)
+from synapse.types.state import StateFilter
from synapse.util import stringutils
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_and_validate_server_name
@@ -229,9 +227,7 @@ async def upgrade_room(
},
)
validate_event_for_room_version(tombstone_event)
- await self._event_auth_handler.check_auth_rules_from_context(
- tombstone_event, tombstone_context
- )
+ await self._event_auth_handler.check_auth_rules_from_context(tombstone_event)
# Upgrade the room
#
@@ -561,7 +557,6 @@ async def clone_existing_room(
invite_list=[],
initial_state=initial_state,
creation_content=creation_content,
- ratelimit=False,
)
# Transfer membership events
@@ -755,6 +750,10 @@ async def create_room(
)
if ratelimit:
+ # Rate limit once in advance, but don't rate limit the individual
+ # events in the room — room creation isn't atomic and it's very
+ # janky if half the events in the initial state don't make it because
+ # of rate limiting.
await self.request_ratelimiter.ratelimit(requester)
room_version_id = config.get(
@@ -915,7 +914,6 @@ async def create_room(
room_alias=room_alias,
power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
- ratelimit=ratelimit,
)
if "name" in config:
@@ -1039,7 +1037,6 @@ async def _send_events_for_new_room(
room_alias: Optional[RoomAlias] = None,
power_level_content_override: Optional[JsonDict] = None,
creator_join_profile: Optional[JsonDict] = None,
- ratelimit: bool = True,
) -> Tuple[int, str, int]:
"""Sends the initial events into a new room. Sends the room creation, membership,
and power level events into the room sequentially, then creates and batches up the
@@ -1048,6 +1045,8 @@ async def _send_events_for_new_room(
`power_level_content_override` doesn't apply when initial state has
power level state event content.
+ Rate limiting should already have been applied by this point.
+
Returns:
A tuple containing the stream ID, event ID and depth of the last
event sent to the room.
@@ -1057,9 +1056,6 @@ async def _send_events_for_new_room(
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
depth = 1
- # the last event sent/persisted to the db
- last_sent_event_id: Optional[str] = None
-
# the most recently created event
prev_event: List[str] = []
# a map of event types, state keys -> event_ids. We collect these mappings this as events are
@@ -1084,6 +1080,19 @@ async def create_event(
for_batch: bool,
**kwargs: Any,
) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
+ """
+ Creates an event and associated event context.
+ Args:
+ etype: the type of event to be created
+ content: content of the event
+ for_batch: whether the event is being created for batch persisting. If
+ bool for_batch is true, this will create an event using the prev_event_ids,
+ and will create an event context for the event using the parameters state_map
+ and current_state_group, thus these parameters must be provided in this
+ case if for_batch is True. The subsequently created event and context
+ are suitable for being batched up and bulk persisted to the database
+ with other similarly created events.
+ """
nonlocal depth
nonlocal prev_event
@@ -1104,26 +1113,6 @@ async def create_event(
return new_event, new_context
- async def send(
- event: EventBase,
- context: synapse.events.snapshot.EventContext,
- creator: Requester,
- ) -> int:
- nonlocal last_sent_event_id
-
- ev = await self.event_creation_handler.handle_new_client_event(
- requester=creator,
- events_and_context=[(event, context)],
- ratelimit=False,
- ignore_shadow_ban=True,
- )
-
- last_sent_event_id = ev.event_id
-
- # we know it was persisted, so must have a stream ordering
- assert ev.internal_metadata.stream_ordering
- return ev.internal_metadata.stream_ordering
-
try:
config = self._presets_dict[preset_config]
except KeyError:
@@ -1137,16 +1126,20 @@ async def send(
)
logger.debug("Sending %s in new room", EventTypes.Member)
- await send(creation_event, creation_context, creator)
+ ev = await self.event_creation_handler.handle_new_client_event(
+ requester=creator,
+ events_and_context=[(creation_event, creation_context)],
+ ratelimit=False,
+ ignore_shadow_ban=True,
+ )
+ last_sent_event_id = ev.event_id
- # Room create event must exist at this point
- assert last_sent_event_id is not None
member_event_id, _ = await self.room_member_handler.update_membership(
creator,
creator.user,
room_id,
"join",
- ratelimit=ratelimit,
+ ratelimit=False,
content=creator_join_profile,
new_room=True,
prev_event_ids=[last_sent_event_id],
@@ -1159,15 +1152,24 @@ async def send(
depth += 1
state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id
+ # we need the state group of the membership event as it is the current state group
+ event_to_state = (
+ await self._storage_controllers.state.get_state_group_for_events(
+ [member_event_id]
+ )
+ )
+ current_state_group = event_to_state[member_event_id]
+
+ events_to_send = []
# We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room.
pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None)
if pl_content is not None:
power_event, power_context = await create_event(
- EventTypes.PowerLevels, pl_content, False
+ EventTypes.PowerLevels, pl_content, True
)
current_state_group = power_context._state_group
- await send(power_event, power_context, creator)
+ events_to_send.append((power_event, power_context))
else:
power_level_content: JsonDict = {
"users": {creator_id: 100},
@@ -1213,12 +1215,11 @@ async def send(
pl_event, pl_context = await create_event(
EventTypes.PowerLevels,
power_level_content,
- False,
+ True,
)
current_state_group = pl_context._state_group
- await send(pl_event, pl_context, creator)
+ events_to_send.append((pl_event, pl_context))
- events_to_send = []
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
room_alias_event, room_alias_context = await create_event(
EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
@@ -1271,7 +1272,10 @@ async def send(
events_to_send.append((encryption_event, encryption_context))
last_event = await self.event_creation_handler.handle_new_client_event(
- creator, events_to_send, ignore_shadow_ban=True
+ creator,
+ events_to_send,
+ ignore_shadow_ban=True,
+ ratelimit=False,
)
assert last_event.internal_metadata.stream_ordering is not None
return last_event.internal_metadata.stream_ordering, last_event.event_id, depth
@@ -1447,7 +1451,7 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]:
events_before=events_before,
event=event,
events_after=events_after,
- state=await filter_evts(state_events),
+ state=state_events,
aggregations=aggregations,
start=await token.copy_and_replace(
StreamKeyType.ROOM, results.start
@@ -1493,7 +1497,12 @@ async def get_event_for_timestamp(
Raises:
SynapseError if unable to find any event locally in the given direction
"""
-
+ logger.debug(
+ "get_event_for_timestamp(room_id=%s, timestamp=%s, direction=%s) Finding closest event...",
+ room_id,
+ timestamp,
+ direction,
+ )
local_event_id = await self.store.get_event_id_for_timestamp(
room_id, timestamp, direction
)
@@ -1545,85 +1554,54 @@ async def get_event_for_timestamp(
)
)
- # Loop through each homeserver candidate until we get a succesful response
- for domain in likely_domains:
- # We don't want to ask our own server for information we don't have
- if domain == self.server_name:
- continue
+ remote_response = await self.federation_client.timestamp_to_event(
+ destinations=likely_domains,
+ room_id=room_id,
+ timestamp=timestamp,
+ direction=direction,
+ )
+ if remote_response is not None:
+ logger.debug(
+ "get_event_for_timestamp: remote_response=%s",
+ remote_response,
+ )
- try:
- remote_response = await self.federation_client.timestamp_to_event(
- domain, room_id, timestamp, direction
- )
- logger.debug(
- "get_event_for_timestamp: response from domain(%s)=%s",
- domain,
- remote_response,
- )
+ remote_event_id = remote_response.event_id
+ remote_origin_server_ts = remote_response.origin_server_ts
- remote_event_id = remote_response.event_id
- remote_origin_server_ts = remote_response.origin_server_ts
-
- # Backfill this event so we can get a pagination token for
- # it with `/context` and paginate `/messages` from this
- # point.
- #
- # TODO: The requested timestamp may lie in a part of the
- # event graph that the remote server *also* didn't have,
- # in which case they will have returned another event
- # which may be nowhere near the requested timestamp. In
- # the future, we may need to reconcile that gap and ask
- # other homeservers, and/or extend `/timestamp_to_event`
- # to return events on *both* sides of the timestamp to
- # help reconcile the gap faster.
- remote_event = (
- await self.federation_event_handler.backfill_event_id(
- domain, room_id, remote_event_id
- )
- )
+ # Backfill this event so we can get a pagination token for
+ # it with `/context` and paginate `/messages` from this
+ # point.
+ pulled_pdu_info = await self.federation_event_handler.backfill_event_id(
+ likely_domains, room_id, remote_event_id
+ )
+ remote_event = pulled_pdu_info.pdu
- # XXX: When we see that the remote server is not trustworthy,
- # maybe we should not ask them first in the future.
- if remote_origin_server_ts != remote_event.origin_server_ts:
- logger.info(
- "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
- domain,
- remote_event_id,
- remote_origin_server_ts,
- remote_event.origin_server_ts,
- )
-
- # Only return the remote event if it's closer than the local event
- if not local_event or (
- abs(remote_event.origin_server_ts - timestamp)
- < abs(local_event.origin_server_ts - timestamp)
- ):
- logger.info(
- "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
- remote_event_id,
- remote_event.origin_server_ts,
- timestamp,
- local_event.event_id if local_event else None,
- local_event.origin_server_ts if local_event else None,
- )
- return remote_event_id, remote_origin_server_ts
- except (HttpResponseException, InvalidResponseError) as ex:
- # Let's not put a high priority on some other homeserver
- # failing to respond or giving a random response
- logger.debug(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s",
- domain,
- type(ex).__name__,
- ex,
- ex.args,
+ # XXX: When we see that the remote server is not trustworthy,
+ # maybe we should not ask them first in the future.
+ if remote_origin_server_ts != remote_event.origin_server_ts:
+ logger.info(
+ "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.",
+ pulled_pdu_info.pull_origin,
+ remote_event_id,
+ remote_origin_server_ts,
+ remote_event.origin_server_ts,
)
- except Exception:
- # But we do want to see some exceptions in our code
- logger.warning(
- "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception",
- domain,
- exc_info=True,
+
+ # Only return the remote event if it's closer than the local event
+ if not local_event or (
+ abs(remote_event.origin_server_ts - timestamp)
+ < abs(local_event.origin_server_ts - timestamp)
+ ):
+ logger.info(
+ "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)",
+ remote_event_id,
+ remote_event.origin_server_ts,
+ timestamp,
+ local_event.event_id if local_event else None,
+ local_event.origin_server_ts if local_event else None,
)
+ return remote_event_id, remote_origin_server_ts
# To appease mypy, we have to add both of these conditions to check for
# `None`. We only expect `local_event` to be `None` when
@@ -1646,7 +1624,7 @@ async def get_new_events(
self,
user: UserID,
from_key: RoomStreamToken,
- limit: Optional[int],
+ limit: int,
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 6ad2b38b8f96..0c39e852a12e 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -34,7 +34,6 @@
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.logging import opentracing
from synapse.module_api import NOT_SPAM
-from synapse.storage.state import StateFilter
from synapse.types import (
JsonDict,
Requester,
@@ -45,6 +44,7 @@
create_requester,
get_domain_from_id,
)
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index 9602f0d0bb48..874860d461e0 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -441,7 +441,7 @@ def saml_response_to_user_attributes(
client_redirect_url: where the client wants to redirect to
Returns:
- dict: A dict containing new user attributes. Possible keys:
+ A dict containing new user attributes. Possible keys:
* mxid_localpart (str): Required. The localpart of the user's mxid
* displayname (str): The displayname of the user
* emails (list[str]): Any emails for the user
@@ -483,7 +483,7 @@ def parse_config(config: dict) -> SamlConfig:
Args:
config: A dictionary containing configuration options for this provider
Returns:
- SamlConfig: A custom config object for this module
+ A custom config object for this module
"""
# Parse config options and use defaults where necessary
mxid_source_attribute = config.get("mxid_source_attribute", "uid")
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index bcab98c6d547..33115ce488ca 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -23,8 +23,8 @@
from synapse.api.errors import NotFoundError, SynapseError
from synapse.api.filtering import Filter
from synapse.events import EventBase
-from synapse.storage.state import StateFilter
from synapse.types import JsonDict, StreamKeyType, UserID
+from synapse.types.state import StateFilter
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index 73861bbd4085..bd9d0bb34b1a 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -15,6 +15,7 @@
from typing import TYPE_CHECKING, Optional
from synapse.api.errors import Codes, StoreError, SynapseError
+from synapse.handlers.device import DeviceHandler
from synapse.types import Requester
if TYPE_CHECKING:
@@ -29,7 +30,10 @@ class SetPasswordHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._auth_handler = hs.get_auth_handler()
- self._device_handler = hs.get_device_handler()
+ # This can only be instantiated on the main process.
+ device_handler = hs.get_device_handler()
+ assert isinstance(device_handler, DeviceHandler)
+ self._device_handler = device_handler
async def set_password(
self,
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index e035677b8a10..44e70fc4b874 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
+import hashlib
+import io
import logging
from typing import (
TYPE_CHECKING,
@@ -37,6 +39,7 @@
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError
from synapse.config.sso import SsoAttributeRequirement
+from synapse.handlers.device import DeviceHandler
from synapse.handlers.register import init_counters_for_auth_provider
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
from synapse.http import get_request_user_agent
@@ -137,6 +140,7 @@ class UserAttributes:
localpart: Optional[str]
confirm_localpart: bool = False
display_name: Optional[str] = None
+ picture: Optional[str] = None
emails: Collection[str] = attr.Factory(list)
@@ -191,9 +195,14 @@ def __init__(self, hs: "HomeServer"):
self._server_name = hs.hostname
self._registration_handler = hs.get_registration_handler()
self._auth_handler = hs.get_auth_handler()
+ self._device_handler = hs.get_device_handler()
self._error_template = hs.config.sso.sso_error_template
self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
self._profile_handler = hs.get_profile_handler()
+ self._media_repo = (
+ hs.get_media_repository() if hs.config.media.can_load_media_repo else None
+ )
+ self._http_client = hs.get_proxied_blacklisted_http_client()
# The following template is shown after a successful user interactive
# authentication session. It tells the user they can close the window.
@@ -493,6 +502,8 @@ async def complete_sso_login_request(
await self._profile_handler.set_displayname(
user_id_obj, requester, attributes.display_name, True
)
+ if attributes.picture:
+ await self.set_avatar(user_id, attributes.picture)
await self._auth_handler.complete_sso_login(
user_id,
@@ -701,8 +712,110 @@ async def _register_mapped_user(
await self._store.record_user_external_id(
auth_provider_id, remote_user_id, registered_user_id
)
+
+ # Set avatar, if available
+ if attributes.picture:
+ await self.set_avatar(registered_user_id, attributes.picture)
+
return registered_user_id
+ async def set_avatar(self, user_id: str, picture_https_url: str) -> bool:
+ """Set avatar of the user.
+
+ This downloads the image file from the URL provided, stores that in
+ the media repository and then sets the avatar on the user's profile.
+
+ It can detect if the same image is being saved again and bails early by storing
+ the hash of the file in the `upload_name` of the avatar image.
+
+ Currently, it only supports server configurations which run the media repository
+ within the same process.
+
+ It silently fails and logs a warning by raising an exception and catching it
+ internally if:
+ * it is unable to fetch the image itself (non 200 status code) or
+ * the image supplied is bigger than max allowed size or
+ * the image type is not one of the allowed image types.
+
+ Args:
+ user_id: matrix user ID in the form @localpart:domain as a string.
+
+ picture_https_url: HTTPS url for the picture image file.
+
+ Returns: `True` if the user's avatar has been successfully set to the image at
+ `picture_https_url`.
+ """
+ if self._media_repo is None:
+ logger.info(
+ "failed to set user avatar because out-of-process media repositories "
+ "are not supported yet "
+ )
+ return False
+
+ try:
+ uid = UserID.from_string(user_id)
+
+ def is_allowed_mime_type(content_type: str) -> bool:
+ if (
+ self._profile_handler.allowed_avatar_mimetypes
+ and content_type
+ not in self._profile_handler.allowed_avatar_mimetypes
+ ):
+ return False
+ return True
+
+ # download picture, enforcing size limit & mime type check
+ picture = io.BytesIO()
+
+ content_length, headers, uri, code = await self._http_client.get_file(
+ url=picture_https_url,
+ output_stream=picture,
+ max_size=self._profile_handler.max_avatar_size,
+ is_allowed_content_type=is_allowed_mime_type,
+ )
+
+ if code != 200:
+ raise Exception(
+ "GET request to download sso avatar image returned {}".format(code)
+ )
+
+ # upload name includes hash of the image file's content so that we can
+ # easily check if it requires an update or not, the next time user logs in
+ upload_name = "sso_avatar_" + hashlib.sha256(picture.read()).hexdigest()
+
+ # bail if user already has the same avatar
+ profile = await self._profile_handler.get_profile(user_id)
+ if profile["avatar_url"] is not None:
+ server_name = profile["avatar_url"].split("/")[-2]
+ media_id = profile["avatar_url"].split("/")[-1]
+ if server_name == self._server_name:
+ media = await self._media_repo.store.get_local_media(media_id)
+ if media is not None and upload_name == media["upload_name"]:
+ logger.info("skipping saving the user avatar")
+ return True
+
+ # store it in media repository
+ avatar_mxc_url = await self._media_repo.create_content(
+ media_type=headers[b"Content-Type"][0].decode("utf-8"),
+ upload_name=upload_name,
+ content=picture,
+ content_length=content_length,
+ auth_user=uid,
+ )
+
+ # save it as user avatar
+ await self._profile_handler.set_avatar_url(
+ uid,
+ create_requester(uid),
+ str(avatar_mxc_url),
+ )
+
+ logger.info("successfully saved the user avatar")
+ return True
+ except Exception:
+ logger.warning("failed to save the user avatar")
+ return False
+
async def complete_sso_ui_auth_request(
self,
auth_provider_id: str,
@@ -874,7 +987,7 @@ async def handle_submit_username_request(
)
async def handle_terms_accepted(
- self, request: Request, session_id: str, terms_version: str
+ self, request: SynapseRequest, session_id: str, terms_version: str
) -> None:
"""Handle a request to the new-user 'consent' endpoint
@@ -1026,6 +1139,84 @@ def check_required_attributes(
return True
+ async def revoke_sessions_for_provider_session_id(
+ self,
+ auth_provider_id: str,
+ auth_provider_session_id: str,
+ expected_user_id: Optional[str] = None,
+ ) -> None:
+ """Revoke any devices and in-flight logins tied to a provider session.
+
+ Can only be called from the main process.
+
+ Args:
+ auth_provider_id: A unique identifier for this SSO provider, e.g.
+ "oidc" or "saml".
+ auth_provider_session_id: The session ID from the provider to logout
+ expected_user_id: The user we're expecting to logout. If set, it will ignore
+ sessions belonging to other users and log an error.
+ """
+
+ # It is expected that this is the main process.
+ assert isinstance(
+ self._device_handler, DeviceHandler
+ ), "revoking SSO sessions can only be called on the main process"
+
+ # Invalidate any running user-mapping sessions
+ to_delete = []
+ for session_id, session in self._username_mapping_sessions.items():
+ if (
+ session.auth_provider_id == auth_provider_id
+ and session.auth_provider_session_id == auth_provider_session_id
+ ):
+ to_delete.append(session_id)
+
+ for session_id in to_delete:
+ logger.info("Revoking mapping session %s", session_id)
+ del self._username_mapping_sessions[session_id]
+
+ # Invalidate any in-flight login tokens
+ await self._store.invalidate_login_tokens_by_session_id(
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ # Fetch any device(s) in the store associated with the session ID.
+ devices = await self._store.get_devices_by_auth_provider_session_id(
+ auth_provider_id=auth_provider_id,
+ auth_provider_session_id=auth_provider_session_id,
+ )
+
+ # We have no guarantee that all the devices of that session are for the same
+ # `user_id`. Hence, we have to iterate over the list of devices and log them out
+ # one by one.
+ for device in devices:
+ user_id = device["user_id"]
+ device_id = device["device_id"]
+
+ # If the user_id associated with that device/session is not the one we got
+ # out of the `sub` claim, skip that device and show log an error.
+ if expected_user_id is not None and user_id != expected_user_id:
+ logger.error(
+ "Received a logout notification from SSO provider "
+ f"{auth_provider_id!r} for the user {expected_user_id!r}, but with "
+ f"a session ID ({auth_provider_session_id!r}) which belongs to "
+ f"{user_id!r}. This may happen when the SSO provider user mapper "
+ "uses something else than the standard attribute as mapping ID. "
+ "For OIDC providers, set `backchannel_logout_ignore_sub` to `true` "
+ "in the provider config if that is the case."
+ )
+ continue
+
+ logger.info(
+ "Logging out %r (device %r) via SSO (%r) logout notification (session %r).",
+ user_id,
+ device_id,
+ auth_provider_id,
+ auth_provider_session_id,
+ )
+ await self._device_handler.delete_devices(user_id, [device_id])
+
def get_username_mapping_session_cookie_from_request(request: IRequest) -> str:
"""Extract the session ID from the cookie
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 1db5d6802106..7d6a653747b6 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -31,18 +31,24 @@
import attr
from prometheus_client import Counter
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.filtering import FilterCollection
from synapse.api.presence import UserPresenceState
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase
from synapse.handlers.relations import BundledAggregations
from synapse.logging.context import current_context
-from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span
+from synapse.logging.opentracing import (
+ SynapseTags,
+ log_kv,
+ set_tag,
+ start_active_span,
+ trace,
+)
from synapse.push.clientformat import format_push_rules_for_user
from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
+from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
-from synapse.storage.state import StateFilter
from synapse.types import (
DeviceListUpdates,
JsonDict,
@@ -54,6 +60,7 @@
StreamToken,
UserID,
)
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.lrucache import LruCache
@@ -805,18 +812,6 @@ async def compute_summary(
if canonical_alias and canonical_alias.content.get("alias"):
return summary
- me = sync_config.user.to_string()
-
- joined_user_ids = [
- r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me
- ]
- invited_user_ids = [
- r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me
- ]
- gone_user_ids = [
- r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
- ] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
-
# FIXME: only build up a member_ids list for our heroes
member_ids = {}
for membership in (
@@ -828,11 +823,8 @@ async def compute_summary(
for user_id, event_id in details.get(membership, empty_ms).members:
member_ids[user_id] = event_id
- # FIXME: order by stream ordering rather than as returned by SQL
- if joined_user_ids or invited_user_ids:
- summary["m.heroes"] = sorted(joined_user_ids + invited_user_ids)[0:5]
- else:
- summary["m.heroes"] = sorted(gone_user_ids)[0:5]
+ me = sync_config.user.to_string()
+ summary["m.heroes"] = extract_heroes_from_room_summary(details, me)
if not sync_config.filter_collection.lazy_load_members():
return summary
@@ -1440,14 +1432,14 @@ async def generate_sync_result(
logger.debug("Fetching OTK data")
device_id = sync_config.device_id
- one_time_key_counts: JsonDict = {}
+ one_time_keys_count: JsonDict = {}
unused_fallback_key_types: List[str] = []
if device_id:
# TODO: We should have a way to let clients differentiate between the states of:
# * no change in OTK count since the provided since token
# * the server has zero OTKs left for this device
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
- one_time_key_counts = await self.store.count_e2e_one_time_keys(
+ one_time_keys_count = await self.store.count_e2e_one_time_keys(
user_id, device_id
)
unused_fallback_key_types = (
@@ -1477,7 +1469,7 @@ async def generate_sync_result(
archived=sync_result_builder.archived,
to_device=sync_result_builder.to_device,
device_lists=device_lists,
- device_one_time_keys_count=one_time_key_counts,
+ device_one_time_keys_count=one_time_keys_count,
device_unused_fallback_key_types=unused_fallback_key_types,
next_batch=sync_result_builder.now_token,
)
@@ -1542,10 +1534,12 @@ async def _generate_sync_entry_for_device_list(
#
# If we don't have that info cached then we get all the users that
# share a room with our user and check if those users have changed.
- changed_users = self.store.get_cached_device_list_changes(
+ cache_result = self.store.get_cached_device_list_changes(
since_token.device_list_key
)
- if changed_users is not None:
+ if cache_result.hit:
+ changed_users = cache_result.entities
+
result = await self.store.get_rooms_for_users(changed_users)
for changed_user_id, entries in result.items():
@@ -1598,6 +1592,7 @@ async def _generate_sync_entry_for_device_list(
else:
return DeviceListUpdates()
+ @trace
async def _generate_sync_entry_for_to_device(
self, sync_result_builder: "SyncResultBuilder"
) -> None:
@@ -1617,11 +1612,16 @@ async def _generate_sync_entry_for_to_device(
)
for message in messages:
- # We pop here as we shouldn't be sending the message ID down
- # `/sync`
- message_id = message.pop("message_id", None)
- if message_id:
- set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
+ log_kv(
+ {
+ "event": "to_device_message",
+ "sender": message["sender"],
+ "type": message["type"],
+ EventContentFields.TO_DEVICE_MSGID: message["content"].get(
+ EventContentFields.TO_DEVICE_MSGID
+ ),
+ }
+ )
logger.debug(
"Returning %d to-device messages between %d and %d (current token: %d)",
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index f95369166985..3f656ea4f508 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -420,11 +420,11 @@ async def get_all_typing_updates(
if last_id == current_id:
return [], current_id, False
- changed_rooms: Optional[
- Iterable[str]
- ] = self._typing_stream_change_cache.get_all_entities_changed(last_id)
+ result = self._typing_stream_change_cache.get_all_entities_changed(last_id)
- if changed_rooms is None:
+ if result.hit:
+ changed_rooms: Iterable[str] = result.entities
+ else:
changed_rooms = self._room_serials
rows = []
@@ -513,7 +513,7 @@ async def get_new_events(
self,
user: UserID,
from_key: int,
- limit: Optional[int],
+ limit: int,
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py
index 6a9f6635d2c0..8729630581b5 100644
--- a/synapse/http/additional_resource.py
+++ b/synapse/http/additional_resource.py
@@ -45,8 +45,7 @@ def __init__(
Args:
hs: homeserver
- handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred):
- function to be called to handle the request.
+ handler: function to be called to handle the request.
"""
super().__init__()
self._handler = handler
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 084d0a5b84e9..4eb740c04020 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -25,7 +25,6 @@
List,
Mapping,
Optional,
- Sequence,
Tuple,
Union,
)
@@ -90,14 +89,29 @@
"synapse_http_client_responses", "", ["method", "code"]
)
-# the type of the headers list, to be passed to the t.w.h.Headers.
-# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so
-# we simplify.
+# the type of the headers map, to be passed to the t.w.h.Headers.
+#
+# The actual type accepted by Twisted is
+# Mapping[Union[str, bytes], Sequence[Union[str, bytes]] ,
+# allowing us to mix and match str and bytes freely. However: any str is also a
+# Sequence[str]; passing a header string value which is a
+# standalone str is interpreted as a sequence of 1-codepoint strings. This is a disastrous footgun.
+# We use a narrower value type (RawHeaderValue) to avoid this footgun.
+#
+# We also simplify the keys to be either all str or all bytes. This helps because
+# Dict[K, V] is invariant in K (and indeed V).
RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]]
# the value actually has to be a List, but List is invariant so we can't specify that
# the entries can either be Lists or bytes.
-RawHeaderValue = Sequence[Union[str, bytes]]
+RawHeaderValue = Union[
+ List[str],
+ List[bytes],
+ List[Union[str, bytes]],
+ Tuple[str, ...],
+ Tuple[bytes, ...],
+ Tuple[Union[str, bytes], ...],
+]
def check_against_blacklist(
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 2f0177f1e203..0359231e7dd3 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -155,11 +155,10 @@ def request(
a file for a file upload). Or None if the request is to have
no body.
Returns:
- Deferred[twisted.web.iweb.IResponse]:
- fires when the header of the response has been received (regardless of the
- response status code). Fails if there is any problem which prevents that
- response from being received (including problems that prevent the request
- from being sent).
+ A deferred which fires when the header of the response has been received
+ (regardless of the response status code). Fails if there is any problem
+ which prevents that response from being received (including problems that
+ prevent the request from being sent).
"""
# We use urlparse as that will set `port` to None if there is no
# explicit port.
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 3c35b1d2c7af..b92f1d3d1af5 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -951,8 +951,7 @@ async def post_json(
args: query params
Returns:
- dict|list: Succeeds when we get a 2xx HTTP response. The
- result will be the decoded JSON body.
+ Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body.
Raises:
HttpResponseException: If we get an HTTP response code >= 300
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index 1f8227896f65..18899bc6d18d 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -34,7 +34,7 @@
)
from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
-from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
+from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
from synapse.http import redact_uri
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
@@ -134,7 +134,7 @@ def request(
uri: bytes,
headers: Optional[Headers] = None,
bodyProducer: Optional[IBodyProducer] = None,
- ) -> defer.Deferred:
+ ) -> "defer.Deferred[IResponse]":
"""
Issue a request to the server indicated by the given uri.
@@ -157,17 +157,17 @@ def request(
a file upload). Or, None if the request is to have no body.
Returns:
- Deferred[IResponse]: completes when the header of the response has
- been received (regardless of the response status code).
+ A deferred which completes when the header of the response has
+ been received (regardless of the response status code).
- Can fail with:
- SchemeNotSupported: if the uri is not http or https
+ Can fail with:
+ SchemeNotSupported: if the uri is not http or https
- twisted.internet.error.TimeoutError if the server we are connecting
- to (proxy or destination) does not accept a connection before
- connectTimeout.
+ twisted.internet.error.TimeoutError if the server we are connecting
+ to (proxy or destination) does not accept a connection before
+ connectTimeout.
- ... other things too.
+ ... other things too.
"""
uri = uri.strip()
if not _VALID_URI.match(uri):
diff --git a/synapse/http/server.py b/synapse/http/server.py
index bcbfac2c9fff..2563858f3cdf 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -19,6 +19,7 @@
import types
import urllib
from http import HTTPStatus
+from http.client import FOUND
from inspect import isawaitable
from typing import (
TYPE_CHECKING,
@@ -266,7 +267,7 @@ def register_paths(
request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex.
This should return either tuple of (code, response), or None.
- servlet_classname (str): The name of the handler to be used in prometheus
+ servlet_classname: The name of the handler to be used in prometheus
and opentracing logs.
"""
@@ -339,7 +340,7 @@ async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, An
return callback_return
- _unrecognised_request_handler(request)
+ return _unrecognised_request_handler(request)
@abc.abstractmethod
def _send_response(
@@ -576,7 +577,24 @@ def _unrecognised_request_handler(request: Request) -> NoReturn:
Args:
request: Unused, but passed in to match the signature of ServletCallback.
"""
- raise UnrecognizedRequestError()
+ raise UnrecognizedRequestError(code=404)
+
+
+class UnrecognizedRequestResource(resource.Resource):
+ """
+ Similar to twisted.web.resource.NoResource, but returns a JSON 404 with an
+ errcode of M_UNRECOGNIZED.
+ """
+
+ def render(self, request: SynapseRequest) -> int:
+ f = failure.Failure(UnrecognizedRequestError(code=404))
+ return_json_error(f, request, None)
+ # A response has already been sent but Twisted requires either NOT_DONE_YET
+ # or the response bytes as a return value.
+ return NOT_DONE_YET
+
+ def getChild(self, name: str, request: Request) -> resource.Resource:
+ return self
class RootRedirect(resource.Resource):
@@ -598,7 +616,7 @@ def getChild(self, name: str, request: Request) -> resource.Resource:
class OptionsResource(resource.Resource):
"""Responds to OPTION requests for itself and all children."""
- def render_OPTIONS(self, request: Request) -> bytes:
+ def render_OPTIONS(self, request: SynapseRequest) -> bytes:
request.setResponseCode(204)
request.setHeader(b"Content-Length", b"0")
@@ -763,7 +781,7 @@ def respond_with_json(
def respond_with_json_bytes(
- request: Request,
+ request: SynapseRequest,
code: int,
json_bytes: bytes,
send_cors: bool = False,
@@ -859,7 +877,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
_ByteProducer(request, bytes_generator)
-def set_cors_headers(request: Request) -> None:
+def set_cors_headers(request: SynapseRequest) -> None:
"""Set the CORS headers so that javascript running in a web browsers can
use this API
@@ -870,10 +888,20 @@ def set_cors_headers(request: Request) -> None:
request.setHeader(
b"Access-Control-Allow-Methods", b"GET, HEAD, POST, PUT, DELETE, OPTIONS"
)
- request.setHeader(
- b"Access-Control-Allow-Headers",
- b"X-Requested-With, Content-Type, Authorization, Date",
- )
+ if request.experimental_cors_msc3886:
+ request.setHeader(
+ b"Access-Control-Allow-Headers",
+ b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match",
+ )
+ request.setHeader(
+ b"Access-Control-Expose-Headers",
+ b"ETag, Location, X-Max-Bytes",
+ )
+ else:
+ request.setHeader(
+ b"Access-Control-Allow-Headers",
+ b"X-Requested-With, Content-Type, Authorization, Date",
+ )
def set_corp_headers(request: Request) -> None:
@@ -942,10 +970,25 @@ def set_clickjacking_protection_headers(request: Request) -> None:
request.setHeader(b"Content-Security-Policy", b"frame-ancestors 'none';")
-def respond_with_redirect(request: Request, url: bytes) -> None:
- """Write a 302 response to the request, if it is still alive."""
+def respond_with_redirect(
+ request: SynapseRequest, url: bytes, statusCode: int = FOUND, cors: bool = False
+) -> None:
+ """
+ Write a 302 (or other specified status code) response to the request, if it is still alive.
+
+ Args:
+ request: The http request to respond to.
+ url: The URL to redirect to.
+ statusCode: The HTTP status code to use for the redirect (defaults to 302).
+ cors: Whether to set CORS headers on the response.
+ """
logger.debug("Redirect to %s", url.decode("utf-8"))
- request.redirect(url)
+
+ if cors:
+ set_cors_headers(request)
+
+ request.setResponseCode(statusCode)
+ request.setHeader(b"location", url)
finish_request(request)
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 80acbdcf3ce7..dead02cd5c4f 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -35,6 +35,7 @@
from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError
+from synapse.http import redact_uri
from synapse.http.server import HttpServer
from synapse.types import JsonDict, RoomAlias, RoomID
from synapse.util import json_decoder
@@ -664,7 +665,13 @@ def parse_json_value_from_request(
try:
content = json_decoder.decode(content_bytes.decode("utf-8"))
except Exception as e:
- logger.warning("Unable to parse JSON: %s (%s)", e, content_bytes)
+ logger.warning(
+ "Unable to parse JSON from %s %s response: %s (%s)",
+ request.method.decode("ascii", errors="replace"),
+ redact_uri(request.uri.decode("ascii", errors="replace")),
+ e,
+ content_bytes,
+ )
raise SynapseError(
HTTPStatus.BAD_REQUEST, "Content not JSON.", errcode=Codes.NOT_JSON
)
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 55a6afce3552..6a1dbf7f33b6 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -82,6 +82,7 @@ def __init__(
self.reactor = site.reactor
self._channel = channel # this is used by the tests
self.start_time = 0.0
+ self.experimental_cors_msc3886 = site.experimental_cors_msc3886
# The requester, if authenticated. For federation requests this is the
# server name, for client requests this is the Requester object.
@@ -399,7 +400,7 @@ def _started_processing(self, servlet_name: str) -> None:
be sure to call finished_processing.
Args:
- servlet_name (str): the name of the servlet which will be
+ servlet_name: the name of the servlet which will be
processing this request. This is used in the metrics.
It is possible to update this afterwards by updating
@@ -622,6 +623,8 @@ def __init__(
request_id_header = config.http_options.request_id_header
+ self.experimental_cors_msc3886 = config.http_options.experimental_cors_msc3886
+
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
return request_class(
channel,
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 6a08ffed646f..f62bea968fe4 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -117,8 +117,7 @@ def __init__(self, copy_from: "Optional[ContextResourceUsage]" = None) -> None:
"""Create a new ContextResourceUsage
Args:
- copy_from (ContextResourceUsage|None): if not None, an object to
- copy stats from
+ copy_from: if not None, an object to copy stats from
"""
if copy_from is None:
self.reset()
@@ -162,7 +161,7 @@ def __iadd__(self, other: "ContextResourceUsage") -> "ContextResourceUsage":
"""Add another ContextResourceUsage's stats to this one's.
Args:
- other (ContextResourceUsage): the other resource usage object
+ other: the other resource usage object
"""
self.ru_utime += other.ru_utime
self.ru_stime += other.ru_stime
@@ -342,7 +341,7 @@ def current_context(cls) -> LoggingContextOrSentinel:
called directly.
Returns:
- LoggingContext: the current logging context
+ The current logging context
"""
warnings.warn(
"synapse.logging.context.LoggingContext.current_context() is deprecated "
@@ -362,7 +361,8 @@ def set_current_context(
called directly.
Args:
- context(LoggingContext): The context to activate.
+ context: The context to activate.
+
Returns:
The context that was previously active
"""
@@ -474,8 +474,7 @@ def get_resource_usage(self) -> ContextResourceUsage:
"""Get resources used by this logcontext so far.
Returns:
- ContextResourceUsage: a *copy* of the object tracking resource
- usage so far
+ A *copy* of the object tracking resource usage so far
"""
# we always return a copy, for consistency
res = self._resource_usage.copy()
@@ -663,7 +662,8 @@ def current_context() -> LoggingContextOrSentinel:
def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
"""Set the current logging context in thread local storage
Args:
- context(LoggingContext): The context to activate.
+ context: The context to activate.
+
Returns:
The context that was previously active
"""
@@ -700,7 +700,7 @@ def nested_logging_context(suffix: str) -> LoggingContext:
suffix: suffix to add to the parent context's 'name'.
Returns:
- LoggingContext: new logging context.
+ A new logging context.
"""
curr_context = current_context()
if not curr_context:
@@ -898,20 +898,19 @@ def defer_to_thread(
on it.
Args:
- reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
- the Deferred will be invoked, and whose threadpool we should use for the
- function.
+ reactor: The reactor in whose main thread the Deferred will be invoked,
+ and whose threadpool we should use for the function.
Normally this will be hs.get_reactor().
- f (callable): The function to call.
+ f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
- Deferred: A Deferred which fires a callback with the result of `f`, or an
+ A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
@@ -939,20 +938,20 @@ def defer_to_threadpool(
on it.
Args:
- reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
- the Deferred will be invoked. Normally this will be hs.get_reactor().
+ reactor: The reactor in whose main thread the Deferred will be invoked.
+ Normally this will be hs.get_reactor().
- threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
- running `f`. Normally this will be hs.get_reactor().getThreadPool().
+ threadpool: The threadpool to use for running `f`. Normally this will be
+ hs.get_reactor().getThreadPool().
- f (callable): The function to call.
+ f: The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
- Deferred: A Deferred which fires a callback with the result of `f`, or an
+ A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
curr_context = current_context()
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 8ce5a2a33818..a705af83565d 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -292,8 +292,15 @@ def report_span(self, span: "opentracing.Span") -> None:
class SynapseTags:
- # The message ID of any to_device message processed
- TO_DEVICE_MESSAGE_ID = "to_device.message_id"
+ # The message ID of any to_device EDU processed
+ TO_DEVICE_EDU_ID = "to_device.edu_id"
+
+ # Details about to-device messages
+ TO_DEVICE_TYPE = "to_device.type"
+ TO_DEVICE_SENDER = "to_device.sender"
+ TO_DEVICE_RECIPIENT = "to_device.recipient"
+ TO_DEVICE_RECIPIENT_DEVICE = "to_device.recipient_device"
+ TO_DEVICE_MSGID = "to_device.msgid" # client-generated ID
# Whether the sync response has new data to be returned to the client.
SYNC_RESULT = "sync.new_data"
@@ -721,7 +728,7 @@ def inject_header_dict(
destination: address of entity receiving the span context. Must be given unless
check_destination is False. The context will only be injected if the
destination matches the opentracing whitelist
- check_destination (bool): If false, destination will be ignored and the context
+ check_destination: If false, destination will be ignored and the context
will always be injected.
Note:
@@ -780,7 +787,7 @@ def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str
destination: the name of the remote server.
Returns:
- dict: the active span's context if opentracing is enabled, otherwise empty.
+ the active span's context if opentracing is enabled, otherwise empty.
"""
if destination and not whitelisted_homeserver(destination):
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index c3d3daf8774c..b01372565d14 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -47,11 +47,7 @@
# This module is imported for its side effects; flake8 needn't warn that it's unused.
import synapse.metrics._reactor_metrics # noqa: F401
from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
-from synapse.metrics._legacy_exposition import (
- MetricsResource,
- generate_latest,
- start_http_server,
-)
+from synapse.metrics._twisted_exposition import MetricsResource, generate_latest
from synapse.metrics._types import Collector
from synapse.util import SYNAPSE_VERSION
@@ -474,7 +470,6 @@ def register_threadpool(name: str, threadpool: ThreadPool) -> None:
"Collector",
"MetricsResource",
"generate_latest",
- "start_http_server",
"LaterGauge",
"InFlightGauge",
"GaugeBucketCollector",
diff --git a/synapse/metrics/_legacy_exposition.py b/synapse/metrics/_legacy_exposition.py
deleted file mode 100644
index 1459f9d224b3..000000000000
--- a/synapse/metrics/_legacy_exposition.py
+++ /dev/null
@@ -1,288 +0,0 @@
-# Copyright 2015-2019 Prometheus Python Client Developers
-# Copyright 2019 Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This code is based off `prometheus_client/exposition.py` from version 0.7.1.
-
-Due to the renaming of metrics in prometheus_client 0.4.0, this customised
-vendoring of the code will emit both the old versions that Synapse dashboards
-expect, and the newer "best practice" version of the up-to-date official client.
-"""
-import logging
-import math
-import threading
-from http.server import BaseHTTPRequestHandler, HTTPServer
-from socketserver import ThreadingMixIn
-from typing import Any, Dict, List, Type, Union
-from urllib.parse import parse_qs, urlparse
-
-from prometheus_client import REGISTRY, CollectorRegistry
-from prometheus_client.core import Sample
-
-from twisted.web.resource import Resource
-from twisted.web.server import Request
-
-logger = logging.getLogger(__name__)
-CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
-
-
-def floatToGoString(d: Union[int, float]) -> str:
- d = float(d)
- if d == math.inf:
- return "+Inf"
- elif d == -math.inf:
- return "-Inf"
- elif math.isnan(d):
- return "NaN"
- else:
- s = repr(d)
- dot = s.find(".")
- # Go switches to exponents sooner than Python.
- # We only need to care about positive values for le/quantile.
- if d > 0 and dot > 6:
- mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
- return f"{mantissa}e+0{dot - 1}"
- return s
-
-
-def sample_line(line: Sample, name: str) -> str:
- if line.labels:
- labelstr = "{{{0}}}".format(
- ",".join(
- [
- '{}="{}"'.format(
- k,
- v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
- )
- for k, v in sorted(line.labels.items())
- ]
- )
- )
- else:
- labelstr = ""
- timestamp = ""
- if line.timestamp is not None:
- # Convert to milliseconds.
- timestamp = f" {int(float(line.timestamp) * 1000):d}"
- return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
-
-
-# Mapping from new metric names to legacy metric names.
-# We translate these back to their old names when exposing them through our
-# legacy vendored exporter.
-# Only this legacy exposition module applies these name changes.
-LEGACY_METRIC_NAMES = {
- "synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits",
- "synapse_util_caches_cache_size": "synapse_util_caches_cache:size",
- "synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size",
- "synapse_util_caches_cache": "synapse_util_caches_cache:total",
- "synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size",
- "synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits",
- "synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size",
- "synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total",
- "synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total",
- "synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count",
- "synapse_admin_mau_current": "synapse_admin_mau:current",
- "synapse_admin_mau_max": "synapse_admin_mau:max",
- "synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users",
-}
-
-
-def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes:
- """
- Generate metrics in legacy format. Modern metrics are generated directly
- by prometheus-client.
- """
-
- output = []
-
- for metric in registry.collect():
- if not metric.samples:
- # No samples, don't bother.
- continue
-
- # Translate to legacy metric name if it has one.
- mname = LEGACY_METRIC_NAMES.get(metric.name, metric.name)
- mnewname = metric.name
- mtype = metric.type
-
- # OpenMetrics -> Prometheus
- if mtype == "counter":
- mnewname = mnewname + "_total"
- elif mtype == "info":
- mtype = "gauge"
- mnewname = mnewname + "_info"
- elif mtype == "stateset":
- mtype = "gauge"
- elif mtype == "gaugehistogram":
- mtype = "histogram"
- elif mtype == "unknown":
- mtype = "untyped"
-
- # Output in the old format for compatibility.
- if emit_help:
- output.append(
- "# HELP {} {}\n".format(
- mname,
- metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
- )
- )
- output.append(f"# TYPE {mname} {mtype}\n")
-
- om_samples: Dict[str, List[str]] = {}
- for s in metric.samples:
- for suffix in ["_created", "_gsum", "_gcount"]:
- if s.name == mname + suffix:
- # OpenMetrics specific sample, put in a gauge at the end.
- # (these come from gaugehistograms which don't get renamed,
- # so no need to faff with mnewname)
- om_samples.setdefault(suffix, []).append(sample_line(s, s.name))
- break
- else:
- newname = s.name.replace(mnewname, mname)
- if ":" in newname and newname.endswith("_total"):
- newname = newname[: -len("_total")]
- output.append(sample_line(s, newname))
-
- for suffix, lines in sorted(om_samples.items()):
- if emit_help:
- output.append(
- "# HELP {}{} {}\n".format(
- mname,
- suffix,
- metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
- )
- )
- output.append(f"# TYPE {mname}{suffix} gauge\n")
- output.extend(lines)
-
- # Get rid of the weird colon things while we're at it
- if mtype == "counter":
- mnewname = mnewname.replace(":total", "")
- mnewname = mnewname.replace(":", "_")
-
- if mname == mnewname:
- continue
-
- # Also output in the new format, if it's different.
- if emit_help:
- output.append(
- "# HELP {} {}\n".format(
- mnewname,
- metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
- )
- )
- output.append(f"# TYPE {mnewname} {mtype}\n")
-
- for s in metric.samples:
- # Get rid of the OpenMetrics specific samples (we should already have
- # dealt with them above anyway.)
- for suffix in ["_created", "_gsum", "_gcount"]:
- if s.name == mname + suffix:
- break
- else:
- sample_name = LEGACY_METRIC_NAMES.get(s.name, s.name)
- output.append(
- sample_line(s, sample_name.replace(":total", "").replace(":", "_"))
- )
-
- return "".join(output).encode("utf-8")
-
-
-class MetricsHandler(BaseHTTPRequestHandler):
- """HTTP handler that gives metrics from ``REGISTRY``."""
-
- registry = REGISTRY
-
- def do_GET(self) -> None:
- registry = self.registry
- params = parse_qs(urlparse(self.path).query)
-
- if "help" in params:
- emit_help = True
- else:
- emit_help = False
-
- try:
- output = generate_latest(registry, emit_help=emit_help)
- except Exception:
- self.send_error(500, "error generating metric output")
- raise
- try:
- self.send_response(200)
- self.send_header("Content-Type", CONTENT_TYPE_LATEST)
- self.send_header("Content-Length", str(len(output)))
- self.end_headers()
- self.wfile.write(output)
- except BrokenPipeError as e:
- logger.warning(
- "BrokenPipeError when serving metrics (%s). Did Prometheus restart?", e
- )
-
- def log_message(self, format: str, *args: Any) -> None:
- """Log nothing."""
-
- @classmethod
- def factory(cls, registry: CollectorRegistry) -> Type:
- """Returns a dynamic MetricsHandler class tied
- to the passed registry.
- """
- # This implementation relies on MetricsHandler.registry
- # (defined above and defaulted to REGISTRY).
-
- # As we have unicode_literals, we need to create a str()
- # object for type().
- cls_name = str(cls.__name__)
- MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
- return MyMetricsHandler
-
-
-class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
- """Thread per request HTTP server."""
-
- # Make worker threads "fire and forget". Beginning with Python 3.7 this
- # prevents a memory leak because ``ThreadingMixIn`` starts to gather all
- # non-daemon threads in a list in order to join on them at server close.
- # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
- # same as Python 3.7's ``ThreadingHTTPServer``.
- daemon_threads = True
-
-
-def start_http_server(
- port: int, addr: str = "", registry: CollectorRegistry = REGISTRY
-) -> None:
- """Starts an HTTP server for prometheus metrics as a daemon thread"""
- CustomMetricsHandler = MetricsHandler.factory(registry)
- httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
- t = threading.Thread(target=httpd.serve_forever)
- t.daemon = True
- t.start()
-
-
-class MetricsResource(Resource):
- """
- Twisted ``Resource`` that serves prometheus metrics.
- """
-
- isLeaf = True
-
- def __init__(self, registry: CollectorRegistry = REGISTRY):
- self.registry = registry
-
- def render_GET(self, request: Request) -> bytes:
- request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
- response = generate_latest(self.registry)
- request.setHeader(b"Content-Length", str(len(response)))
- return response
diff --git a/synapse/metrics/_twisted_exposition.py b/synapse/metrics/_twisted_exposition.py
new file mode 100644
index 000000000000..0abcd1495383
--- /dev/null
+++ b/synapse/metrics/_twisted_exposition.py
@@ -0,0 +1,38 @@
+# Copyright 2015-2019 Prometheus Python Client Developers
+# Copyright 2019 Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from prometheus_client import REGISTRY, CollectorRegistry, generate_latest
+
+from twisted.web.resource import Resource
+from twisted.web.server import Request
+
+CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
+
+
+class MetricsResource(Resource):
+ """
+ Twisted ``Resource`` that serves prometheus metrics.
+ """
+
+ isLeaf = True
+
+ def __init__(self, registry: CollectorRegistry = REGISTRY):
+ self.registry = registry
+
+ def render_GET(self, request: Request) -> bytes:
+ request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
+ response = generate_latest(self.registry)
+ request.setHeader(b"Content-Length", str(len(response)))
+ return response
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index 7a1516d3a89c..9ea4e23b3107 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -174,8 +174,10 @@ def update_metrics(self) -> None:
diff = new_stats - self._reported_stats
self._reported_stats = new_stats
- _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
- _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
+ # For unknown reasons, the difference in times can be negative. See comment in
+ # synapse.http.request_metrics.RequestMetrics.update_metrics.
+ _background_process_ru_utime.labels(self.desc).inc(max(diff.ru_utime, 0))
+ _background_process_ru_stime.labels(self.desc).inc(max(diff.ru_stime, 0))
_background_process_db_txn_count.labels(self.desc).inc(diff.db_txn_count)
_background_process_db_txn_duration.labels(self.desc).inc(
diff.db_txn_duration_sec
diff --git a/synapse/metrics/common_usage_metrics.py b/synapse/metrics/common_usage_metrics.py
index 0a22ea3d923c..6e05b043d3cd 100644
--- a/synapse/metrics/common_usage_metrics.py
+++ b/synapse/metrics/common_usage_metrics.py
@@ -54,7 +54,9 @@ async def get_metrics(self) -> CommonUsageMetrics:
async def setup(self) -> None:
"""Keep the gauges for common usage metrics up to date."""
- await self._update_gauges()
+ run_as_background_process(
+ desc="common_usage_metrics_update_gauges", func=self._update_gauges
+ )
self._clock.looping_call(
run_as_background_process,
5 * 60 * 1000,
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 6a6ae208d157..0092a03c59a7 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -86,6 +86,7 @@
ON_LOGGED_OUT_CALLBACK,
AuthHandler,
)
+from synapse.handlers.device import DeviceHandler
from synapse.handlers.push_rules import RuleSpec, check_actions
from synapse.http.client import SimpleHttpClient
from synapse.http.server import (
@@ -110,7 +111,6 @@
)
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.roommember import ProfileInfo
-from synapse.storage.state import StateFilter
from synapse.types import (
DomainSpecificString,
JsonDict,
@@ -123,6 +123,7 @@
UserProfile,
create_requester,
)
+from synapse.types.state import StateFilter
from synapse.util import Clock
from synapse.util.async_helpers import maybe_awaitable
from synapse.util.caches.descriptors import CachedFunction, cached
@@ -207,6 +208,7 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None:
self._registration_handler = hs.get_registration_handler()
self._send_email_handler = hs.get_send_email_handler()
self._push_rules_handler = hs.get_push_rules_handler()
+ self._device_handler = hs.get_device_handler()
self.custom_template_dir = hs.config.server.custom_template_directory
try:
@@ -771,50 +773,11 @@ async def create_login_token(
auth_provider_session_id: The session ID got during login from the SSO IdP,
if any.
"""
- # The deprecated `generate_short_term_login_token` method defaulted to an empty
- # string for the `auth_provider_id` because of how the underlying macaroon was
- # generated. This will change to a proper NULL-able field when the tokens get
- # moved to the database.
- return self._hs.get_macaroon_generator().generate_short_term_login_token(
+ return await self._hs.get_auth_handler().create_login_token_for_user_id(
user_id,
- auth_provider_id or "",
- auth_provider_session_id,
duration_in_ms,
- )
-
- def generate_short_term_login_token(
- self,
- user_id: str,
- duration_in_ms: int = (2 * 60 * 1000),
- auth_provider_id: str = "",
- auth_provider_session_id: Optional[str] = None,
- ) -> str:
- """Generate a login token suitable for m.login.token authentication
-
- Added in Synapse v1.9.0.
-
- This was deprecated in Synapse v1.69.0 in favor of create_login_token, and will
- be removed in Synapse 1.71.0.
-
- Args:
- user_id: gives the ID of the user that the token is for
-
- duration_in_ms: the time that the token will be valid for
-
- auth_provider_id: the ID of the SSO IdP that the user used to authenticate
- to get this token, if any. This is encoded in the token so that
- /login can report stats on number of successful logins by IdP.
- """
- logger.warn(
- "A module configured on this server uses ModuleApi.generate_short_term_login_token(), "
- "which is deprecated in favor of ModuleApi.create_login_token(), and will be removed in "
- "Synapse 1.71.0",
- )
- return self._hs.get_macaroon_generator().generate_short_term_login_token(
- user_id,
auth_provider_id,
auth_provider_session_id,
- duration_in_ms,
)
@defer.inlineCallbacks
@@ -823,10 +786,12 @@ def invalidate_access_token(
) -> Generator["defer.Deferred[Any]", Any, None]:
"""Invalidate an access token for a user
+ Can only be called from the main process.
+
Added in Synapse v0.25.0.
Args:
- access_token(str): access token
+ access_token: access token
Returns:
twisted.internet.defer.Deferred - resolves once the access token
@@ -835,6 +800,10 @@ def invalidate_access_token(
Raises:
synapse.api.errors.AuthError: the access token is invalid
"""
+ assert isinstance(
+ self._device_handler, DeviceHandler
+ ), "invalidate_access_token can only be called on the main process"
+
# see if the access token corresponds to a device
user_info = yield defer.ensureDeferred(
self._auth.get_user_by_access_token(access_token)
@@ -844,7 +813,7 @@ def invalidate_access_token(
if device_id:
# delete the device, which will also delete its access tokens
yield defer.ensureDeferred(
- self._hs.get_device_handler().delete_devices(user_id, [device_id])
+ self._device_handler.delete_devices(user_id, [device_id])
)
else:
# no associated device. Just delete the access token.
@@ -871,7 +840,7 @@ def run_db_interaction(
**kwargs: named args to be passed to func
Returns:
- Deferred[object]: result of func
+ Result of func
"""
# type-ignore: See https://github.com/python/mypy/issues/8862
return defer.ensureDeferred(
@@ -963,8 +932,7 @@ def get_state_events_in_room(
to represent 'any') of the room state to acquire.
Returns:
- twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
- The filtered state events in the room.
+ The filtered state events in the room.
"""
state_ids = yield defer.ensureDeferred(
self._storage_controllers.state.get_current_state_ids(
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index eced182fd571..f27ba64d5365 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -13,18 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import itertools
import logging
from typing import (
TYPE_CHECKING,
Any,
Collection,
Dict,
- Iterable,
List,
Mapping,
Optional,
- Set,
Tuple,
Union,
)
@@ -32,13 +29,14 @@
from prometheus_client import Counter
from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes
+from synapse.api.room_versions import PushRuleRoomFlag, RoomVersion
from synapse.event_auth import auth_types_for_event, get_user_power_level
from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership
-from synapse.storage.state import StateFilter
-from synapse.synapse_rust.push import FilteredPushRules, PushRule, PushRuleEvaluator
+from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator
+from synapse.types.state import StateFilter
from synapse.util.caches import register_cache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_event_for_clients_with_state
@@ -48,7 +46,6 @@
logger = logging.getLogger(__name__)
-
push_rules_invalidation_counter = Counter(
"synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", ""
)
@@ -109,6 +106,9 @@ def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
self._event_auth_handler = hs.get_event_auth_handler()
+ self.should_calculate_push_rules = self.hs.config.push.enable_push
+
+ self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled
self.room_push_rule_cache_metrics = register_cache(
"cache",
@@ -117,9 +117,6 @@ def __init__(self, hs: "HomeServer"):
resizable=False,
)
- # Whether to support MSC3772 is supported.
- self._relations_match_enabled = self.hs.config.experimental.msc3772_enabled
-
async def _get_rules_for_event(
self,
event: EventBase,
@@ -171,8 +168,21 @@ async def _get_rules_for_event(
return rules_by_user
async def _get_power_levels_and_sender_level(
- self, event: EventBase, context: EventContext
+ self,
+ event: EventBase,
+ context: EventContext,
+ event_id_to_event: Mapping[str, EventBase],
) -> Tuple[dict, Optional[int]]:
+ """
+ Given an event and an event context, get the power level event relevant to the event
+ and the power level of the sender of the event.
+ Args:
+ event: event to check
+ context: context of event to check
+ event_id_to_event: a mapping of event_id to event for a set of events being
+ batch persisted. This is needed as the sought-after power level event may
+ be in this batch rather than the DB
+ """
# There are no power levels and sender levels possible to get from outlier
if event.internal_metadata.is_outlier():
return {}, None
@@ -183,15 +193,26 @@ async def _get_power_levels_and_sender_level(
)
pl_event_id = prev_state_ids.get(POWER_KEY)
+ # fastpath: if there's a power level event, that's all we need, and
+ # not having a power level event is an extreme edge case
if pl_event_id:
- # fastpath: if there's a power level event, that's all we need, and
- # not having a power level event is an extreme edge case
- auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
+ # Get the power level event from the batch, or fall back to the database.
+ pl_event = event_id_to_event.get(pl_event_id)
+ if pl_event:
+ auth_events = {POWER_KEY: pl_event}
+ else:
+ auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
else:
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=False
)
auth_events_dict = await self.store.get_events(auth_events_ids)
+ # Some needed auth events might be in the batch, combine them with those
+ # fetched from the database.
+ for auth_event_id in auth_events_ids:
+ auth_event = event_id_to_event.get(auth_event_id)
+ if auth_event:
+ auth_events_dict[auth_event_id] = auth_event
auth_events = {(e.type, e.state_key): e for e in auth_events_dict.values()}
sender_level = get_user_power_level(event.sender, auth_events)
@@ -200,61 +221,82 @@ async def _get_power_levels_and_sender_level(
return pl_event.content if pl_event else {}, sender_level
- async def _get_mutual_relations(
- self, parent_id: str, rules: Iterable[Tuple[PushRule, bool]]
- ) -> Dict[str, Set[Tuple[str, str]]]:
- """
- Fetch event metadata for events which related to the same event as the given event.
-
- If the given event has no relation information, returns an empty dictionary.
-
- Args:
- parent_id: The event ID which is targeted by relations.
- rules: The push rules which will be processed for this event.
+ async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]:
+ """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation
Returns:
- A dictionary of relation type to:
- A set of tuples of:
- The sender
- The event type
+ Mapping of relation type to flattened events.
"""
+ related_events: Dict[str, Dict[str, str]] = {}
+ if self._related_event_match_enabled:
+ related_event_id = event.content.get("m.relates_to", {}).get("event_id")
+ relation_type = event.content.get("m.relates_to", {}).get("rel_type")
+ if related_event_id is not None and relation_type is not None:
+ related_event = await self.store.get_event(
+ related_event_id, allow_none=True
+ )
+ if related_event is not None:
+ related_events[relation_type] = _flatten_dict(related_event)
+
+ reply_event_id = (
+ event.content.get("m.relates_to", {})
+ .get("m.in_reply_to", {})
+ .get("event_id")
+ )
- # If the experimental feature is not enabled, skip fetching relations.
- if not self._relations_match_enabled:
- return {}
+ # convert replies to pseudo relations
+ if reply_event_id is not None:
+ related_event = await self.store.get_event(
+ reply_event_id, allow_none=True
+ )
- # Pre-filter to figure out which relation types are interesting.
- rel_types = set()
- for rule, enabled in rules:
- if not enabled:
- continue
+ if related_event is not None:
+ related_events["m.in_reply_to"] = _flatten_dict(related_event)
- for condition in rule.conditions:
- if condition["kind"] != "org.matrix.msc3772.relation_match":
- continue
+ # indicate that this is from a fallback relation.
+ if relation_type == "m.thread" and event.content.get(
+ "m.relates_to", {}
+ ).get("is_falling_back", False):
+ related_events["m.in_reply_to"][
+ "im.vector.is_falling_back"
+ ] = ""
- # rel_type is required.
- rel_type = condition.get("rel_type")
- if rel_type:
- rel_types.add(rel_type)
+ return related_events
- # If no valid rules were found, no mutual relations.
- if not rel_types:
- return {}
-
- # If any valid rules were found, fetch the mutual relations.
- return await self.store.get_mutual_event_relations(parent_id, rel_types)
+ async def action_for_events_by_user(
+ self, events_and_context: List[Tuple[EventBase, EventContext]]
+ ) -> None:
+ """Given a list of events and their associated contexts, evaluate the push rules
+ for each event, check if the message should increment the unread count, and
+ insert the results into the event_push_actions_staging table.
+ """
+ if not self.should_calculate_push_rules:
+ return
+ # For batched events the power level events may not have been persisted yet,
+ # so we pass in the batched events. Thus if the event cannot be found in the
+ # database we can check in the batch.
+ event_id_to_event = {e.event_id: e for e, _ in events_and_context}
+ for event, context in events_and_context:
+ await self._action_for_event_by_user(event, context, event_id_to_event)
@measure_func("action_for_event_by_user")
- async def action_for_event_by_user(
- self, event: EventBase, context: EventContext
+ async def _action_for_event_by_user(
+ self,
+ event: EventBase,
+ context: EventContext,
+ event_id_to_event: Mapping[str, EventBase],
) -> None:
- """Given an event and context, evaluate the push rules, check if the message
- should increment the unread count, and insert the results into the
- event_push_actions_staging table.
- """
- if not event.internal_metadata.is_notifiable():
- # Push rules for events that aren't notifiable can't be processed by this
+
+ if (
+ not event.internal_metadata.is_notifiable()
+ or event.internal_metadata.is_historical()
+ ):
+ # Push rules for events that aren't notifiable can't be processed by this and
+ # we want to skip push notification actions for historical messages
+ # because we don't want to notify people about old history back in time.
+ # The historical messages also do not have the proper `context.current_state_ids`
+ # and `state_groups` because they have `prev_events` that aren't persisted yet
+ # (historical messages persisted in reverse-chronological order).
return
# Disable counting as unread unless the experimental configuration is
@@ -274,25 +316,24 @@ async def action_for_event_by_user(
(
power_levels,
sender_power_level,
- ) = await self._get_power_levels_and_sender_level(event, context)
+ ) = await self._get_power_levels_and_sender_level(
+ event, context, event_id_to_event
+ )
+ # Find the event's thread ID.
relation = relation_from_event(event)
- # If the event does not have a relation, then cannot have any mutual
- # relations or thread ID.
- relations = {}
+ # If the event does not have a relation, then it cannot have a thread ID.
thread_id = MAIN_TIMELINE
if relation:
- relations = await self._get_mutual_relations(
- relation.parent_id,
- itertools.chain(*(r.rules() for r in rules_by_user.values())),
- )
# Recursively attempt to find the thread this event relates to.
if relation.rel_type == RelationTypes.THREAD:
thread_id = relation.parent_id
else:
# Since the event has not yet been persisted we check whether
# the parent is part of a thread.
- thread_id = await self.store.get_thread_id(relation.parent_id) or "main"
+ thread_id = await self.store.get_thread_id(relation.parent_id)
+
+ related_events = await self._related_events(event)
# It's possible that old room versions have non-integer power levels (floats or
# strings). Workaround this by explicitly converting to int.
@@ -302,12 +343,14 @@ async def action_for_event_by_user(
notification_levels[user_id] = int(level)
evaluator = PushRuleEvaluator(
- _flatten_dict(event),
+ _flatten_dict(event, room_version=event.room_version),
room_member_count,
sender_power_level,
notification_levels,
- relations,
- self._relations_match_enabled,
+ related_events,
+ self._related_event_match_enabled,
+ event.room_version.msc3931_push_features,
+ self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
)
users = rules_by_user.keys()
@@ -383,6 +426,7 @@ async def action_for_event_by_user(
def _flatten_dict(
d: Union[EventBase, Mapping[str, Any]],
+ room_version: Optional[RoomVersion] = None,
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, str]] = None,
) -> Dict[str, str]:
@@ -394,6 +438,31 @@ def _flatten_dict(
if isinstance(value, str):
result[".".join(prefix + [key])] = value.lower()
elif isinstance(value, Mapping):
+ # do not set `room_version` due to recursion considerations below
_flatten_dict(value, prefix=(prefix + [key]), result=result)
+ # `room_version` should only ever be set when looking at the top level of an event
+ if (
+ room_version is not None
+ and PushRuleRoomFlag.EXTENSIBLE_EVENTS in room_version.msc3931_push_features
+ and isinstance(d, EventBase)
+ ):
+ # Room supports extensible events: replace `content.body` with the plain text
+ # representation from `m.markup`, as per MSC1767.
+ markup = d.get("content").get("m.markup")
+ if room_version.identifier.startswith("org.matrix.msc1767."):
+ markup = d.get("content").get("org.matrix.msc1767.markup")
+ if markup is not None and isinstance(markup, list):
+ text = ""
+ for rep in markup:
+ if not isinstance(rep, dict):
+ # invalid markup - skip all processing
+ break
+ if rep.get("mimetype", "text/plain") == "text/plain":
+ rep_text = rep.get("body")
+ if rep_text is not None and isinstance(rep_text, str):
+ text = rep_text.lower()
+ break
+ result["content.body"] = text
+
return result
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index 7095ae83f931..622a1e35c5fe 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -44,6 +44,12 @@ def format_push_rules_for_user(
rulearray.append(template_rule)
+ pattern_type = template_rule.pop("pattern_type", None)
+ if pattern_type == "user_id":
+ template_rule["pattern"] = user.to_string()
+ elif pattern_type == "user_localpart":
+ template_rule["pattern"] = user.localpart
+
template_rule["enabled"] = enabled
if "conditions" not in template_rule:
@@ -93,10 +99,14 @@ def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]:
if len(rule.conditions) != 1:
return None
thecond = rule.conditions[0]
- if "pattern" not in thecond:
- return None
+
templaterule = {"actions": rule.actions}
- templaterule["pattern"] = thecond["pattern"]
+ if "pattern" in thecond:
+ templaterule["pattern"] = thecond["pattern"]
+ elif "pattern_type" in thecond:
+ templaterule["pattern_type"] = thecond["pattern_type"]
+ else:
+ return None
else:
# This should not be reached unless this function is not kept in sync
# with PRIORITY_CLASS_INVERSE_MAP.
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index c2575ba3d94d..93b255ced579 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -37,8 +37,8 @@
TemplateVars,
)
from synapse.storage.databases.main.event_push_actions import EmailPushAction
-from synapse.storage.state import StateFilter
from synapse.types import StateMap, UserID
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute
from synapse.visibility import filter_events_for_client
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index edeba27a4553..7ee07e4beebd 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -17,7 +17,6 @@
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
-from synapse.util.async_helpers import concurrently_execute
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
@@ -26,23 +25,12 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
badge = len(invites)
- room_notifs = []
-
- async def get_room_unread_count(room_id: str) -> None:
- room_notifs.append(
- await store.get_unread_event_push_actions_by_room_for_user(
- room_id,
- user_id,
- )
- )
-
- await concurrently_execute(get_room_unread_count, joins, 10)
-
- for notifs in room_notifs:
- # Combine the counts from all the threads.
- notify_count = notifs.main_timeline.notify_count + sum(
- n.notify_count for n in notifs.threads.values()
- )
+ room_to_count = await store.get_unread_counts_by_room_for_user(user_id)
+ for room_id, notify_count in room_to_count.items():
+ # room_to_count may include rooms which the user has left,
+ # ignore those.
+ if room_id not in joins:
+ continue
if notify_count == 0:
continue
@@ -51,8 +39,10 @@ async def get_room_unread_count(room_id: str) -> None:
# return one badge count per conversation
badge += 1
else:
- # increment the badge count by the number of unread messages in the room
+ # Increase badge by number of notifications in room
+ # NOTE: this includes threaded and unthreaded notifications.
badge += notify_count
+
return badge
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index acb0bd18f72e..3f4d3fc51ae3 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -153,7 +153,7 @@ async def _serialize_payload(**kwargs) -> JsonDict:
argument list.
Returns:
- dict: If POST/PUT request then dictionary must be JSON serialisable,
+ If POST/PUT request then dictionary must be JSON serialisable,
otherwise must be appropriate for adding as query args.
"""
return {}
@@ -184,8 +184,10 @@ def make_client(cls, hs: "HomeServer") -> Callable:
client = hs.get_simple_http_client()
local_instance_name = hs.get_instance_name()
+ # The value of these option should match the replication listener settings
master_host = hs.config.worker.worker_replication_host
master_port = hs.config.worker.worker_replication_http_port
+ master_tls = hs.config.worker.worker_replication_http_tls
instance_map = hs.config.worker.instance_map
@@ -205,9 +207,11 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any:
if instance_name == "master":
host = master_host
port = master_port
+ tls = master_tls
elif instance_name in instance_map:
host = instance_map[instance_name].host
port = instance_map[instance_name].port
+ tls = instance_map[instance_name].tls
else:
raise Exception(
"Instance %r not in 'instance_map' config" % (instance_name,)
@@ -238,7 +242,11 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any:
"Unknown METHOD on %s replication endpoint" % (cls.NAME,)
)
- uri = "http://%s:%s/_synapse/replication/%s/%s" % (
+ # Here the protocol is hard coded to be http by default or https in case the replication
+ # port is set to have tls true.
+ scheme = "https" if tls else "http"
+ uri = "%s://%s:%s/_synapse/replication/%s/%s" % (
+ scheme,
host,
port,
cls.NAME,
diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py
index 3d63645726b9..7c4941c3d3f5 100644
--- a/synapse/replication/http/devices.py
+++ b/synapse/replication/http/devices.py
@@ -13,11 +13,12 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Tuple
+from typing import TYPE_CHECKING, Optional, Tuple
from twisted.web.server import Request
from synapse.http.server import HttpServer
+from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import JsonDict
@@ -62,7 +63,12 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self.device_list_updater = hs.get_device_handler().device_list_updater
+ from synapse.handlers.device import DeviceHandler
+
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.device_list_updater = handler.device_list_updater
+
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
@@ -72,11 +78,77 @@ async def _serialize_payload(user_id: str) -> JsonDict: # type: ignore[override
async def _handle_request( # type: ignore[override]
self, request: Request, user_id: str
- ) -> Tuple[int, JsonDict]:
+ ) -> Tuple[int, Optional[JsonDict]]:
user_devices = await self.device_list_updater.user_device_resync(user_id)
return 200, user_devices
+class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
+ """Ask master to upload keys for the user and send them out over federation to
+ update other servers.
+
+ For now, only the master is permitted to handle key upload requests;
+ any worker can handle key query requests (since they're read-only).
+
+ Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on
+ the main process to accomplish this.
+
+ Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
+ Request format(borrowed and expanded from KeyUploadServlet):
+
+ POST /_synapse/replication/upload_keys_for_user
+
+ {
+ "user_id": "",
+ "device_id": "",
+ "keys": {
+ ....this part can be found in KeyUploadServlet in rest/client/keys.py....
+ }
+ }
+
+ Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet
+
+ """
+
+ NAME = "upload_keys_for_user"
+ PATH_ARGS = ()
+ CACHE = False
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
+ self.e2e_keys_handler = hs.get_e2e_keys_handler()
+ self.store = hs.get_datastores().main
+ self.clock = hs.get_clock()
+
+ @staticmethod
+ async def _serialize_payload( # type: ignore[override]
+ user_id: str, device_id: str, keys: JsonDict
+ ) -> JsonDict:
+
+ return {
+ "user_id": user_id,
+ "device_id": device_id,
+ "keys": keys,
+ }
+
+ async def _handle_request( # type: ignore[override]
+ self, request: Request
+ ) -> Tuple[int, JsonDict]:
+ content = parse_json_object_from_request(request)
+
+ user_id = content["user_id"]
+ device_id = content["device_id"]
+ keys = content["keys"]
+
+ results = await self.e2e_keys_handler.upload_keys_for_user(
+ user_id, device_id, keys
+ )
+
+ return 200, results
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationUserDevicesResyncRestServlet(hs).register(http_server)
+ ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 61abb529c8ae..976c2833603d 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -39,6 +39,16 @@ def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.registration_handler = hs.get_registration_handler()
+ # Default value if the worker that sent the replication request did not include
+ # an 'approved' property.
+ if (
+ hs.config.experimental.msc3866.enabled
+ and hs.config.experimental.msc3866.require_approval_for_new_accounts
+ ):
+ self._approval_default = False
+ else:
+ self._approval_default = True
+
@staticmethod
async def _serialize_payload( # type: ignore[override]
user_id: str,
@@ -92,6 +102,12 @@ async def _handle_request( # type: ignore[override]
await self.registration_handler.check_registration_ratelimit(content["address"])
+ # Always default admin users to approved (since it means they were created by
+ # an admin).
+ approved_default = self._approval_default
+ if content["admin"]:
+ approved_default = True
+
await self.registration_handler.register_with_store(
user_id=user_id,
password_hash=content["password_hash"],
@@ -103,7 +119,7 @@ async def _handle_request( # type: ignore[override]
user_type=content["user_type"],
address=content["address"],
shadow_banned=content["shadow_banned"],
- approved=content["approved"],
+ approved=content.get("approved", approved_default),
)
return 200, {}
diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py
deleted file mode 100644
index f43a360a807c..000000000000
--- a/synapse/replication/slave/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py
deleted file mode 100644
index f43a360a807c..000000000000
--- a/synapse/replication/slave/storage/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py
deleted file mode 100644
index 8f3f953ed474..000000000000
--- a/synapse/replication/slave/storage/_slaved_id_tracker.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import List, Optional, Tuple
-
-from synapse.storage.database import LoggingDatabaseConnection
-from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id
-
-
-class SlavedIdTracker(AbstractStreamIdTracker):
- """Tracks the "current" stream ID of a stream with a single writer.
-
- See `AbstractStreamIdTracker` for more details.
-
- Note that this class does not work correctly when there are multiple
- writers.
- """
-
- def __init__(
- self,
- db_conn: LoggingDatabaseConnection,
- table: str,
- column: str,
- extra_tables: Optional[List[Tuple[str, str]]] = None,
- step: int = 1,
- ):
- self.step = step
- self._current = _load_current_id(db_conn, table, column, step)
- if extra_tables:
- for table, column in extra_tables:
- self.advance(None, _load_current_id(db_conn, table, column))
-
- def advance(self, instance_name: Optional[str], new_id: int) -> None:
- self._current = (max if self.step > 0 else min)(self._current, new_id)
-
- def get_current_token(self) -> int:
- return self._current
-
- def get_current_token_for_writer(self, instance_name: str) -> int:
- return self.get_current_token()
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
deleted file mode 100644
index 6fcade510aac..000000000000
--- a/synapse/replication/slave/storage/devices.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import TYPE_CHECKING, Any, Iterable
-
-from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
-from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.devices import DeviceWorkerStore
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class SlavedDeviceStore(DeviceWorkerStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- self.hs = hs
-
- self._device_list_id_gen = SlavedIdTracker(
- db_conn,
- "device_lists_stream",
- "stream_id",
- extra_tables=[
- ("user_signature_stream", "stream_id"),
- ("device_lists_outbound_pokes", "stream_id"),
- ("device_lists_changes_in_room", "stream_id"),
- ],
- )
-
- super().__init__(database, db_conn, hs)
-
- def get_device_stream_token(self) -> int:
- return self._device_list_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == DeviceListsStream.NAME:
- self._device_list_id_gen.advance(instance_name, token)
- self._invalidate_caches_for_devices(token, rows)
- elif stream_name == UserSignatureStream.NAME:
- self._device_list_id_gen.advance(instance_name, token)
- for row in rows:
- self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
-
- def _invalidate_caches_for_devices(
- self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow]
- ) -> None:
- for row in rows:
- # The entities are either user IDs (starting with '@') whose devices
- # have changed, or remote servers that we need to tell about
- # changes.
- if row.entity.startswith("@"):
- self._device_list_stream_cache.entity_has_changed(row.entity, token)
- self.get_cached_devices_for_user.invalidate((row.entity,))
- self._get_cached_user_device.invalidate((row.entity,))
- self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
-
- else:
- self._device_list_federation_stream_cache.entity_has_changed(
- row.entity, token
- )
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
deleted file mode 100644
index fe47778cb127..000000000000
--- a/synapse/replication/slave/storage/events.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import logging
-from typing import TYPE_CHECKING
-
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
-from synapse.storage.databases.main.event_push_actions import (
- EventPushActionsWorkerStore,
-)
-from synapse.storage.databases.main.events_worker import EventsWorkerStore
-from synapse.storage.databases.main.relations import RelationsWorkerStore
-from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
-from synapse.storage.databases.main.signatures import SignatureWorkerStore
-from synapse.storage.databases.main.state import StateGroupWorkerStore
-from synapse.storage.databases.main.stream import StreamWorkerStore
-from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
-from synapse.util.caches.stream_change_cache import StreamChangeCache
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-# So, um, we want to borrow a load of functions intended for reading from
-# a DataStore, but we don't want to take functions that either write to the
-# DataStore or are cached and don't have cache invalidation logic.
-#
-# Rather than write duplicate versions of those functions, or lift them to
-# a common base class, we going to grab the underlying __func__ object from
-# the method descriptor on the DataStore and chuck them into our class.
-
-
-class SlavedEventStore(
- EventFederationWorkerStore,
- RoomMemberWorkerStore,
- EventPushActionsWorkerStore,
- StreamWorkerStore,
- StateGroupWorkerStore,
- SignatureWorkerStore,
- EventsWorkerStore,
- UserErasureWorkerStore,
- RelationsWorkerStore,
-):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
-
- events_max = self._stream_id_gen.get_current_token()
- curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
- db_conn,
- "current_state_delta_stream",
- entity_column="room_id",
- stream_column="stream_id",
- max_value=events_max, # As we share the stream id with events token
- limit=1000,
- )
- self._curr_state_delta_stream_cache = StreamChangeCache(
- "_curr_state_delta_stream_cache",
- min_curr_state_delta_id,
- prefilled_cache=curr_state_delta_prefill,
- )
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
deleted file mode 100644
index 5e65eaf1e084..000000000000
--- a/synapse/replication/slave/storage/push_rule.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2015, 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import Any, Iterable
-
-from synapse.replication.tcp.streams import PushRulesStream
-from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
-
-from .events import SlavedEventStore
-
-
-class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore):
- def get_max_push_rules_stream_id(self) -> int:
- return self._push_rules_stream_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == PushRulesStream.NAME:
- self._push_rules_stream_id_gen.advance(instance_name, token)
- for row in rows:
- self.get_push_rules_for_user.invalidate((row.user_id,))
- self.push_rules_stream_cache.entity_has_changed(row.user_id, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py
deleted file mode 100644
index 44ed20e4243e..000000000000
--- a/synapse/replication/slave/storage/pushers.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import TYPE_CHECKING, Any, Iterable
-
-from synapse.replication.tcp.streams import PushersStream
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
-from synapse.storage.databases.main.pusher import PusherWorkerStore
-
-from ._slaved_id_tracker import SlavedIdTracker
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class SlavedPusherStore(PusherWorkerStore):
- def __init__(
- self,
- database: DatabasePool,
- db_conn: LoggingDatabaseConnection,
- hs: "HomeServer",
- ):
- super().__init__(database, db_conn, hs)
- self._pushers_id_gen = SlavedIdTracker( # type: ignore
- db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")]
- )
-
- def get_pushers_stream_token(self) -> int:
- return self._pushers_id_gen.get_current_token()
-
- def process_replication_rows(
- self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
- ) -> None:
- if stream_name == PushersStream.NAME:
- self._pushers_id_gen.advance(instance_name, token)
- return super().process_replication_rows(stream_name, instance_name, token, rows)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 18252a2958dc..b4dad47b45ad 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -36,12 +36,14 @@
TagAccountDataStream,
ToDeviceStream,
TypingStream,
+ UnPartialStatedRoomStream,
)
from synapse.replication.tcp.streams.events import (
EventsStream,
EventsStreamEventRow,
EventsStreamRow,
)
+from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStreamRow
from synapse.types import PersistedEventPosition, ReadReceipt, StreamKeyType, UserID
from synapse.util.async_helpers import Linearizer, timeout_deferred
from synapse.util.metrics import Measure
@@ -117,6 +119,7 @@ def __init__(self, hs: "HomeServer"):
self._streams = hs.get_replication_streams()
self._instance_name = hs.get_instance_name()
self._typing_handler = hs.get_typing_handler()
+ self._state_storage_controller = hs.get_storage_controllers().state
self._notify_pushers = hs.config.worker.start_pushers
self._pusher_pool = hs.get_pusherpool()
@@ -236,6 +239,14 @@ async def on_rdata(
self.notifier.notify_user_joined_room(
row.data.event_id, row.data.room_id
)
+ elif stream_name == UnPartialStatedRoomStream.NAME:
+ for row in rows:
+ assert isinstance(row, UnPartialStatedRoomStreamRow)
+
+ # Wake up any tasks waiting for the room to be un-partial-stated.
+ self._state_storage_controller.notify_room_un_partial_stated(
+ row.room_id
+ )
await self._presence_handler.process_replication_rows(
stream_name, instance_name, token, rows
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 7763ffb2d0c7..56a5c21910d9 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -245,7 +245,7 @@ def lineReceived(self, line: bytes) -> None:
self._parse_and_dispatch_line(line)
def _parse_and_dispatch_line(self, line: bytes) -> None:
- if line.strip() == "":
+ if line.strip() == b"":
# Ignore blank lines
return
diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py
index b1cd55bf6ff5..8575666d9ce1 100644
--- a/synapse/replication/tcp/streams/__init__.py
+++ b/synapse/replication/tcp/streams/__init__.py
@@ -42,6 +42,7 @@
)
from synapse.replication.tcp.streams.events import EventsStream
from synapse.replication.tcp.streams.federation import FederationStream
+from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream
STREAMS_MAP = {
stream.NAME: stream
@@ -61,6 +62,7 @@
TagAccountDataStream,
AccountDataStream,
UserSignatureStream,
+ UnPartialStatedRoomStream,
)
}
@@ -80,4 +82,5 @@
"TagAccountDataStream",
"AccountDataStream",
"UserSignatureStream",
+ "UnPartialStatedRoomStream",
]
diff --git a/synapse/replication/tcp/streams/partial_state.py b/synapse/replication/tcp/streams/partial_state.py
new file mode 100644
index 000000000000..18f087ffa251
--- /dev/null
+++ b/synapse/replication/tcp/streams/partial_state.py
@@ -0,0 +1,48 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+import attr
+
+from synapse.replication.tcp.streams import Stream
+from synapse.replication.tcp.streams._base import current_token_without_instance
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class UnPartialStatedRoomStreamRow:
+ # ID of the room that has been un-partial-stated.
+ room_id: str
+
+
+class UnPartialStatedRoomStream(Stream):
+ """
+ Stream to notify about rooms becoming un-partial-stated;
+ that is, when the background sync finishes such that we now have full state for
+ the room.
+ """
+
+ NAME = "un_partial_stated_room"
+ ROW_TYPE = UnPartialStatedRoomStreamRow
+
+ def __init__(self, hs: "HomeServer"):
+ store = hs.get_datastores().main
+ super().__init__(
+ hs.get_instance_name(),
+ # TODO(faster_joins, multiple writers): we need to account for instance names
+ current_token_without_instance(store.get_un_partial_stated_rooms_token),
+ store.get_un_partial_stated_rooms_from_stream,
+ )
diff --git a/synapse/res/templates/_base.html b/synapse/res/templates/_base.html
new file mode 100644
index 000000000000..4b5cc7bcb62a
--- /dev/null
+++ b/synapse/res/templates/_base.html
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+ {% block title %}{% endblock %}
+
+ {% block header %}{% endblock %}
+
+
+
+ {% if app_name == "Riot" %}
+
+ {% elif app_name == "Vector" %}
+
+ {% elif app_name == "Element" %}
+
+ {% else %}
+
+ {% endif %}
+
+
+{% block body %}{% endblock %}
+
+
+
diff --git a/synapse/res/templates/account_previously_renewed.html b/synapse/res/templates/account_previously_renewed.html
index bd4f7cea97a0..91582a8af006 100644
--- a/synapse/res/templates/account_previously_renewed.html
+++ b/synapse/res/templates/account_previously_renewed.html
@@ -1,12 +1,6 @@
-
-
-
-
-
-
- Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
- Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
\ No newline at end of file
+{% extends "_base.html" %}
+{% block title %}Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %}
+
+{% block body %}
+
Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
+{% endblock %}
diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html
index 57b319f37584..18a57833f1f0 100644
--- a/synapse/res/templates/account_renewed.html
+++ b/synapse/res/templates/account_renewed.html
@@ -1,12 +1,6 @@
-
-
-
-
-
-
- Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
- Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
-
-
\ No newline at end of file
+{% extends "_base.html" %}
+{% block title %}Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.{% endblock %}
+
+{% block body %}
+
Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}.
+{% endblock %}
diff --git a/synapse/res/templates/add_threepid.html b/synapse/res/templates/add_threepid.html
index 71f2215b7aea..33c883936ac1 100644
--- a/synapse/res/templates/add_threepid.html
+++ b/synapse/res/templates/add_threepid.html
@@ -1,14 +1,8 @@
-
-
-
-
-
-
- Request to add an email address to your Matrix account
-
-
-
A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:
- Your account might have been deactivated by the server administrator.
- You can either try to create a new account or contact the server’s
- administrator.
-
-
- {% include "sso_footer.html" without context %}
-
-
+{% extends "_base.html" %}
+{% block title %}SSO account deactivated{% endblock %}
+
+{% block header %}
+
+{% endblock %}
+
+{% block body %}
+
+
+
Your account has been deactivated
+
+ No account found
+
+
+ Your account might have been deactivated by the server administrator.
+ You can either try to create a new account or contact the server’s
+ administrator.
+
+
+
+{% include "sso_footer.html" without context %}
+{% endblock %}
diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html
index 2d1db386e1b4..11636d7f5d87 100644
--- a/synapse/res/templates/sso_auth_account_details.html
+++ b/synapse/res/templates/sso_auth_account_details.html
@@ -1,189 +1,186 @@
-
-
-
- Create your account
-
-
-
-
-
-
-
-
-
Create your account
-
This is required. Continue to create your account on {{ server_name }}. You can't change this later.