diff --git a/.aegir.js b/.aegir.js new file mode 100644 index 0000000..5e4bd8a --- /dev/null +++ b/.aegir.js @@ -0,0 +1,76 @@ +'use strict' + +const Libp2p = require('libp2p') +const { MULTIADDRS_WEBSOCKETS } = require('./test/fixtures/browser') +const Peers = require('./test/fixtures/peers') +const docker = require('./mysql-local/docker') +const PeerId = require('peer-id') +const WebSockets = require('libp2p-websockets') +const Muxer = require('libp2p-mplex') +const { NOISE: Crypto } = require('libp2p-noise') + +const isCI = require('is-ci') + +let libp2p +let containerId + +const before = async () => { + // Use the last peer + const peerId = await PeerId.createFromJSON(Peers[Peers.length - 1]) + + libp2p = new Libp2p({ + addresses: { + listen: [MULTIADDRS_WEBSOCKETS[0]] + }, + peerId, + modules: { + transport: [WebSockets], + streamMuxer: [Muxer], + connEncryption: [Crypto] + }, + config: { + relay: { + enabled: true, + hop: { + enabled: true, + active: false + } + } + } + }) + + await libp2p.start() + + // TODO: if not running test suite in Node, can also stop here + // https://github.com/ipfs/aegir/issues/707 + // CI runs own datastore service + if (isCI) { + return + } + + containerId = await docker.start() +} + +const after = async () => { + await libp2p.stop() + + if (isCI || !containerId) { + return + } + + docker.stop(containerId) +} + +module.exports = { + bundlesize: { maxSize: '250kB' }, + hooks: { + pre: before, + post: after + }, + webpack: { + node: { + // this is needed until bcrypto stops using node buffers in browser code + Buffer: true + } + } +} diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..0ed9479 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,4 @@ +* +!src +!README.md +!package.json \ No newline at end of file diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..820e706 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,62 @@ +name: ci +on: + push: + branches: + - master + pull_request: + branches: + - '**' + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: yarn + - run: yarn lint + - uses: gozala/typescript-error-reporter-action@v1.0.8 + - run: yarn build + - run: yarn aegir dep-check + - uses: ipfs/aegir/actions/bundle-size@master + name: size + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + test-node: + needs: check + runs-on: ${{ matrix.os }} + services: + mysql: + image: mysql:5.7 + env: + MYSQL_ROOT_PASSWORD: test-secret-pw + MYSQL_DATABASE: libp2p_rendezvous_db + ports: + - 3306:3306 + options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=3 + strategy: + matrix: + os: [ubuntu-latest] + node: [12, 14] + fail-fast: true + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v1 + with: + node-version: ${{ matrix.node }} + - run: yarn + - run: npx nyc --reporter=lcov aegir test -t node -- --bail + - uses: codecov/codecov-action@v1 + test-chrome: + needs: check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: yarn + - run: npx aegir test -t browser -t webworker --bail + test-firefox: + needs: check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: yarn + - run: npx aegir test -t browser -t webworker --bail -- --browsers FirefoxHeadless diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 74f58e8..0000000 --- a/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -sudo: false -language: node_js - -matrix: - include: - - node_js: 6 - env: CXX=g++-4.8 - - node_js: 8 - env: CXX=g++-4.8 - # - node_js: stable - # env: CXX=g++-4.8 - -script: - - npm run lint - - npm run test - - npm run coverage - -before_script: - - export DISPLAY=:99.0 - - sh -e /etc/init.d/xvfb start - -after_success: - - npm run coverage-publish - -addons: - firefox: 'latest' - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - g++-4.8 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..63f3bad --- /dev/null +++ b/Dockerfile @@ -0,0 +1,28 @@ +FROM node:lts-alpine + +# Install deps +RUN apk add --update git build-base python3 + +# Get dumb-init to allow quit running interactively +RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64 && chmod +x /usr/local/bin/dumb-init + +# Setup directories for the `node` user +RUN mkdir -p /home/node/app/rendezvous/node_modules && chown -R node:node /home/node/app/rendezvous + +WORKDIR /home/node/app/rendezvous + +# Install node modules +COPY package.json ./ +# Switch to the node user for installation +USER node +RUN npm install --production + +# Copy over source files under the node user +COPY --chown=node:node ./src ./src +COPY --chown=node:node ./README.md ./ + +ENV DEBUG libp2p* + +# Available overrides (defaults shown): +# Server logging can be enabled via the DEBUG environment variable +CMD [ "/usr/local/bin/dumb-init", "node", "src/bin.js"] \ No newline at end of file diff --git a/README.md b/README.md index 1443992..65d4070 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,237 @@ -# libp2p-rendezvous +# js-libp2p-rendezvous -A javascript implementation of the rendezvous protocol for libp2p +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://protocol.ai) +[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](http://libp2p.io/) +[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23libp2p) +[![](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) +[![codecov](https://img.shields.io/codecov/c/github/libp2p/js-libp2p-rendezvous.svg?style=flat-square)](https://codecov.io/gh/libp2p/js-libp2p-rendezvous) +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/libp2p/js-libp2p-rendezvous/ci?label=ci&style=flat-square)](https://github.com/libp2p/js-libp2p-rendezvous/actions?query=branch%3Amaster+workflow%3Aci+) +> Javascript implementation of the rendezvous server protocol for libp2p -## Lead Maintainer +## Lead Maintainer [Vasco Santos](https://github.com/vasco-santos). -See https://github.com/libp2p/specs/pull/44 for more details +## Table of Contents + +- [Overview](#overview) +- [Usage](#usage) + - [Install](#install) + - [Testing](#testing) + - [CLI](#cli) + - [Docker Setup](#docker-setup) +- [Garbage Collector](#garbage-collector) +- [Contribute](#contribute) +- [License](#license) + +## Overview + +Libp2p rendezvous is a lightweight mechanism for generalized peer discovery. It can be used for bootstrap purposes, real time peer discovery, application specific routing, and so on. This module is the implementation of the rendezvous server protocol for libp2p. + +See the [SPEC](https://github.com/libp2p/specs/tree/master/rendezvous) for more details. + +## Usage + +### Install + +```bash +> npm install --global libp2p-rendezvous +``` + +Now you can use the cli command `libp2p-rendezvous-server` to spawn a libp2p rendezvous server. Bear in mind that a MySQL database is required to run the rendezvous server. You can also use this module as a library and implement your own datastore to use a different database. A datastore `interface` is provided in this repository. + +### Testing + +For running the tests in this module, you will need to have Docker installed. A docker container is used to run a MySQL database for testing purposes. + +### CLI + +After installing the rendezvous server, you can use its binary. It accepts several arguments: `--datastoreHost`, `--datastoreUser`, `--datastorePassword`, `--datastoreDatabase`, `--enableMemoryDatabase`, `--peerId`, `--listenMultiaddrs`, `--announceMultiaddrs`, `--metricsPort` and `--disableMetrics` + +```sh +libp2p-rendezvous-server [--datastoreHost ] [--datastoreUser ] [datastorePassword ] [datastoreDatabase ] [--enableMemoryDatabase] [--peerId ] [--listenMultiaddrs ... ] [--announceMultiaddrs ... ] [--metricsPort ] [--disableMetrics] +``` + +For further customization (e.g. swapping the muxer, using other transports, use other database) it is recommended to create a server via the API. + +#### Datastore + +A rendezvous server needs to leverage a MySQL database as a datastore for the registrations. This needs to be configured in order to run a rendezvous server. You can rely on docker to run a MySQL database using a command like: + +```sh +docker run -p 3306:3306 -e MYSQL_ROOT_PASSWORD=your-secret-pw -e MYSQL_DATABASE=libp2p_rendezvous_db -d mysql:8 --default-authentication-plugin=mysql_native_password +``` + +Once a MySQL database is running, you can run the rendezvous server by providing the datastore configuration options as follows: + +```sh +libp2p-rendezvous-server --datastoreHost 'localhost' --datastoreUser 'root' --datastorePassword 'your-secret-pw' --datastoreDatabase 'libp2p_rendezvous_db' +``` + +⚠️ For testing purposes you can skip using MySQL and use a memory datastore. **This must not be used in production!**. For this you just need to provide the `--enableMemoryDatabase` option. + +#### PeerId + +You can create a [PeerId](https://github.com/libp2p/js-peer-id) via its [CLI](https://github.com/libp2p/js-peer-id#cli) and use it in the rendezvous server. + +Once you have a generated PeerId json file, you can start the rendezvous with that PeerId by specifying its path via the `--peerId` flag: + +```sh +peer-id --type=ed25519 > id.json +libp2p-rendezvous-server --peerId id.json --datastoreHost 'localhost' --datastoreUser 'root' --datastorePassword 'your-secret-pw' --datastoreDatabase 'libp2p_rendezvous_db' +``` + +#### Multiaddrs + +You can specify the libp2p rendezvous server listen and announce multiaddrs. This server is configured with [libp2p-tcp](https://github.com/libp2p/js-libp2p-tcp) and [libp2p-websockets](https://github.com/libp2p/js-libp2p-websockets) and addresses with this transports should be used. It can always be modified via the API. + +```sh +libp2p-rendezvous-server --peerId id.json --listenMultiaddrs '/ip4/127.0.0.1/tcp/15002/ws' '/ip4/127.0.0.1/tcp/8000' --announceMultiaddrs '/dns4/test.io/tcp/443/wss/p2p/12D3KooWAuEpJKhCAfNcHycKcZCv9Qy69utLAJ3MobjKpsoKbrGA' '/dns6/test.io/tcp/443/wss/p2p/12D3KooWAuEpJKhCAfNcHycKcZCv9Qy69utLAJ3MobjKpsoKbrGA' --datastoreHost 'localhost' --datastoreUser 'root' --datastorePassword 'your-secret-pw' --datastoreDatabase 'libp2p_rendezvous_db' +``` + +By default it listens on `/ip4/127.0.0.1/tcp/8000` and `/ip4/127.0.0.1/tcp/15003/ws`. It has no announce multiaddrs specified. + +#### Metrics + +Metrics are enabled by default on `/ip4/127.0.0.1/tcp/8003` via Prometheus. This port can also be modified with: + +```sh +libp2p-rendezvous-server --metricsPort '8008' +``` + +Moreover, metrics can also be disabled with: + +```sh +libp2p-rendezvous-server --disableMetrics +``` + +### Docker Setup + +When running the rendezvous server in Docker, you can configure the same parameters via environment variables, as follows: + +```sh +PEER_ID='/etc/opt/rendezvous/id.json' +LISTEN_MULTIADDRS='/ip4/127.0.0.1/tcp/15002/ws,/ip4/127.0.0.1/tcp/8001' +ANNOUNCE_MULTIADDRS='/dns4/test.io/tcp/443/wss,/dns6/test.io/tcp/443/wss' +DATASTORE_HOST='localhost' +DATASTORE_USER='root' +DATASTORE_PASSWORD='your-secret-pw' +DATASTORE_DATABASE='libp2p_rendezvous_db' +``` + +Please note that you should expose the listening ports with the docker run command. The default ports used are `8003` for the metrics, `8000` for the tcp listener and `150003` for the websockets listener. + +Example: + +```sh +peer-id --type=ed25519 > id.json +docker build . -t libp2p-rendezvous +docker run -p 8003:8003 -p 15002:15002 -p 8000:8000 \ +-e LISTEN_MULTIADDRS='/ip4/127.0.0.1/tcp/8000,/ip4/127.0.0.1/tcp/15002/ws' \ +-e ANNOUNCE_MULTIADDRS='/dns4/localhost/tcp/8000,/dns4/localhost/tcp/15002/ws' \ +-e DATASTORE_USER='root' \ +-e DATASTORE_PASSWORD='your-secret-pw' \ +-e DATASTORE_DATABASE='libp2p_rendezvous_db' \ +-e PEER_ID='/etc/opt/rendezvous/id.json' \ +-v $PWD/id.json:/etc/opt/rendezvous/id.json \ +-d libp2p-rendezvous +``` + +### Docker compose setup with mysql + +Here follows an example on how you can setup a rendezvous server with a mysql database. + +```yml +version: '3.2' +services: + db: + image: mysql:8 + volumes: + - mysql-db:/var/lib/mysql + command: --default-authentication-plugin=mysql_native_password + restart: always + environment: + - MYSQL_ROOT_PASSWORD=my-secret-pw + - MYSQL_DATABASE=libp2p_rendezvous_db + ports: + - "3306:3306" + healthcheck: + test: ["CMD-SHELL", 'mysqladmin ping'] + interval: 10s + timeout: 2s + retries: 10 + server: + image: libp2p/js-libp2p-rendezvous + volumes: + - ./id.json:/etc/opt/rendezvous/id.json + ports: + - "8000:8000" + - "8003:8003" + - "15003:15003" + restart: always + environment: + - DATASTORE_PASSWORD=my-secret-pw + - DATASTORE_DATABASE=libp2p_rendezvous_db + - DATASTORE_HOST=db + depends_on: + db: + condition: service_healthy +volumes: + mysql-db: +``` + +### Library + +The rendezvous server can be used as a library, in order to spawn your custom server. This is useful if you want to customize libp2p's transports or use a different database as a datastore. + +```js +const RendezvousServer = require('libp2p-rendezvous') + +const server = new RendezvousServer({ + libp2pOptions, + rendezvousServerOptions +}) +``` + +`libp2pOptions` contains the libp2p [node options](https://github.com/libp2p/js-libp2p/blob/master/doc/API.md#create) to create a libp2p node. + +#### rendezvousServerOptions + +The `rendezvousServerOptions` customizes the rendezvous server. Only the `datastore` is required. + +| Name | Type | Description | +|------|------|-------------| +| datastore | `object` | [datastore implementation](./src/server/datastores/README.md) | +| [minTtl] | `number` | minimum acceptable ttl to store a registration | +| [maxTtl] | `number` | maximum acceptable ttl to store a registration | +| [maxNsLength] | `number` | maximum acceptable namespace length | +| [maxDiscoveryLimit] | `number` | maximum acceptable discover limit | +| [maxPeerRegistrations] | `number` | maximum acceptable registrations per peer | +| [gcBootDelay] | `number` | delay before starting garbage collector job | +| [gcMinInterval] | `number` | minimum interval between each garbage collector job, in case maximum threshold reached | +| [gcInterval] | `number` | interval between each garbage collector job | +| [gcMinRegistrations] | `number` | minimum number of registration for triggering garbage collector | +| [gcMaxRegistrations] | `number` | maximum number of registration for triggering garbage collector | + +## Garbage Collector + +The rendezvous server has a built in garbage collector (GC) that removes persisted data over time, as it is expired. + +The GC job has two different triggers. It will run over time according to the configurable `gcBootDelay` and `gcInterval` options, and it will run if it reaches a configurable `gcMaxRegistrations` threshold. + +Taking into account the GC performance, two other factors are considered before the GC interacts with the Datastore. If a configurable number of minimum registrations `gcMinRegistrations` are not stored, the GC job will not act in this GC cycle. Moreover, to avoid multiple attempts of GC when the max threshold is reached, but no records are yet expired, a minimum interval between each job can also be configured with `gcMinInterval`. + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/libp2p/js-libp2p-rendezvous/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md) + +## License + +MIT - Protocol Labs 2020 + +[multiaddr]: https://github.com/multiformats/js-multiaddr diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 58aef65..0000000 --- a/appveyor.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: "{build}" - -environment: - matrix: - - nodejs_version: "6" - - nodejs_version: "8" - -matrix: - fast_finish: true - -install: - # Install Node.js - - ps: Install-Product node $env:nodejs_version - - # Upgrade npm - - npm install -g npm - - # Output our current versions for debugging - - node --version - - npm --version - - # Install our package dependencies - - npm install - -test_script: - - npm run test:node - -build: off diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 0000000..3b4b32a --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,209 @@ +# Rendezvous benchmarks + +This benchmark contains a simulator to stress test a rendezvous server and gather performance metrics from it. + +## Running + +For running the benchmarks, it is required to install the dependencies of the `libp2p-rendezvous`, as well as Docker. With those installed, you only need to run the `index.js` file as follows: + +```sh +$ npm install +$ cd benchmarks +$ node index.js +``` + +While default values exist for benchmarking, you can use CLI parameters to configure how to run the benchmark. + +It is worth mentioning that this benchmark runner will be stressing a rendezvous server running in a separate process. It will run the configured number of libp2p client nodes in parallel, including their Rendezvous operations. As a result, a massive number of clients might degrade the overall performance of the clients as they will all be running in the same machine and process. + +Network Latency is not considered in this benchmark. The benchmark focus on sending rendezvous requests over the wire through local connections. + +### Configuration + +```sh +// Usage: $0 [--nClients ] [--nNamespaces ] [--initialRegistrations ] +// [--benchmarkRuns ] [--benchmarkType ] [--outputFile ] +// [--discoverLimit ] [--discoverInexistentNamespaces] +``` + +### Metrics + +The metrics that can be obtained from this benchmark setup are the following: + +- Operations {Register, Discover} + - Average response time + - Maximum response time + - Median response time +- Server performance + - CPU + - Memory + +The Response Times (RT) metrics are measured in milliseconds while the Memory (Mem) metrics are measured in MB. CPU usage is a % value. + +## Created Performance testing scenarios + +There are a few considerations that we need to have before observing the results: + +- Massive number of clients might degrade the overall performance of the clients as they will all be running in the same machine and process. + - Response times will be influenced by Node's event loop as a large number of asynchronous operations will happen on the client side. +- Number of connections open will influence the overall memory consumption, specially with a large number of parallel operations + - In a real world scenario, connections will be open and closed per Rendezvous operation, while this benchmark kept them open for faster results. + +To ease performance evaluation on this repo, a benchmark shell script was created for running several combinations of inputs in the benchmark, according to the tables below. + +### Register + +Measure adding n registrations. Each operation in the following table + +| Type | Clients | Io registrations | Operations | Namespaces | +|------|---------|------------------|------------|------------| +| `Register` | 5 | 0 | 500 | 10 | +| `Register` | 5 | 1000 | 500 | 10 | +| `Register` | 10 | 1000 | 500 | 10 | +| `Register` | 100 | 1000 | 500 | 10 | +| `Register` | 100 | 1000 | 1000 | 10 | +| `Register` | 100 | 10000 | 500 | 10 | +| `Register` | 100 | 10000 | 1000 | 10 | +| `Register` | 50 | 100000 | 500 | 10 | +| `Register` | 50 | 100000 | 1000 | 10 | +| `Register` | 100 | 100000 | 500 | 10 | +| `Register` | 100 | 100000 | 1000 | 10 | +| `Register` | 200 | 100000 | 500 | 10 | +| `Register` | 200 | 100000 | 1000 | 10 | +| `Register` | 200 | 200000 | 1000 | 10 | + +### Discover + +1. Measure discover existing registrations in series with limit of 20 + +| Type | Clients | Io registrations | Operations | Namespaces | +|------|---------|------------------|------------|------------| +| `Discover` | 5 | 1000 | 500 | 10 | +| `Discover` | 5 | 1000 | 500 | 100 | +| `Discover` | 10 | 10000 | 500 | 10 | +| `Discover` | 10 | 10000 | 500 | 100 | +| `Discover` | 10 | 10000 | 1000 | 10 | +| `Discover` | 10 | 10000 | 1000 | 100 | +| `Discover` | 100 | 100000 | 500 | 10 | +| `Discover` | 100 | 100000 | 500 | 100 | + +2. Measure discover existing registrations in series with limit of 100 + +| Type | Clients | Io registrations | Operations | Namespaces | +|------|---------|------------------|------------|------------| +| `Discover` | 5 | 1000 | 500 | 10 | +| `Discover` | 5 | 1000 | 500 | 100 | +| `Discover` | 10 | 10000 | 500 | 10 | +| `Discover` | 10 | 10000 | 500 | 100 | +| `Discover` | 10 | 10000 | 1000 | 10 | +| `Discover` | 10 | 10000 | 1000 | 100 | +| `Discover` | 100 | 100000 | 500 | 10 | +| `Discover` | 100 | 100000 | 500 | 100 | + +3. Measure trying to discover peers on inexistent namespaces. + +| Type | Clients | Io registrations | Operations | Namespaces | +|------|---------|------------------|------------|------------| +| `Discover` | 5 | 0 | 500 | 10 | +| `Discover` | 5 | 0 | 1000 | 10 | +| `Discover` | 10 | 0 | 1000 | 10 | +| `Discover` | 10 | 0 | 1000 | 100 | +| `Discover` | 100 | 0 | 10000 | 10 | +| `Discover` | 100 | 0 | 10000 | 100 | +| `Discover` | 10 | 10000 | 10000 | 100 | +| `Discover` | 100 | 10000 | 10000 | 100 | +| `Discover` | 10 | 100000 | 10000 | 100 | +| `Discover` | 100 | 100000 | 10000 | 100 | + +### Results obtained + +Running in a Macbook with 2.6 GHz 6-Core Intel Core i7 and 16 GB 2400 MHz DDR4. + +The Response Times (RT) metrics are measured in milliseconds while the Memory (Mem) metrics are measured in MB. CPU usage is a % value. + +**Register** + +| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------| +| REGISTER | 5 | 100 | 10 | 500 | 16 | 15 | 26 | 31 | 42 | 61 | 98 | 98 | 106 | +| REGISTER | 5 | 1000 | 10 | 500 | 14 | 14 | 26 | 34 | 37 | 67 | 113 | 112 | 114 | +| REGISTER | 10 | 1000 | 10 | 500 | 23 | 22 | 55 | 34 | 23 | 60 | 143 | 141 | 149 | +| REGISTER | 100 | 1000 | 10 | 500 | 221 | 224 | 308 | 23 | 22 | 48 | 133 | 132 | 136 | +| REGISTER | 100 | 1000 | 10 | 1000 | 276 | 277 | 410 | 26 | 41 | 54 | 138 | 134 | 165 | +| REGISTER | 100 | 10000 | 10 | 500 | 1900 | 282 | 8468 | 15 | 18 | 75 | 364 | 362 | 382 | +| REGISTER | 100 | 10000 | 10 | 1000 | 1061 | 292 | 8017 | 15 | 19 | 76 | 393 | 392 | 397 | +| REGISTER | 50 | 100000 | 10 | 500 | 23686 | 358 | 57365 | 11 | 8 | 88 | 2341 | 2334 | 2645 | +| REGISTER | 50 | 100000 | 10 | 1000 | 10055 | 425 | 56977 | 10 | 0 | 89 | 2501 | 2543 | 2887 | +| REGISTER | 100 | 100000 | 10 | 500 | 45273 | 674 | 55370 | 11 | 6 | 95 | 2691 | 2718 | 2787 | +| REGISTER | 100 | 100000 | 10 | 1000 | 22849 | 870 | 55060 | 11 | 11 | 88 | 2166 | 2225 | 2522 | +| REGISTER | 200 | 100000 | 10 | 500 | 24572 | 2456 | 108989 | 10 | 10 | 90 | 2468 | 2476 | 2655 | +| REGISTER | 200 | 100000 | 10 | 1000 | 61448 | 2069 | 299530 | 10 | 3 | 87 | 2515 | 2545 | 2742 | +| REGISTER | 200 | 200000 | 10 | 1000 | 168163 | 2485 | 830998 | 12 | 3 | 93 | 2814 | 2896 | 3286 | + +The median Response Time keeps a value below 1000 milliseconds when the server is interacting in parallel with 100 (or less) clients. It increases with the increase of clients interacting in parallel. These results are also affected by running 200 clients in the same process/machine and would probably be better when running in different machines as the Event Loop would be considerably more available to each client. + +As expected, with the increase of clients connected to a single server doing multiple Register operations in parallel the memory consumption increase. Regarding CPU usage, except for some spikes over time, the average and median usage is low. + +**Discover with limit of 20** + +| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------| +| DISCOVER | 5 | 1000 | 10 | 500 | 4 | 4 | 12 | 24 | 34 | 59 | 115 | 115 | 116 | +| DISCOVER | 5 | 1000 | 100 | 500 | 5 | 5 | 17 | 29 | 32 | 66 | 114 | 113 | 115 | +| DISCOVER | 10 | 10000 | 10 | 500 | 144 | 7 | 5129 | 17 | 20 | 67 | 332 | 330 | 359 | +| DISCOVER | 10 | 10000 | 100 | 500 | 177 | 7 | 6505 | 17 | 9 | 88 | 367 | 369 | 370 | +| DISCOVER | 10 | 10000 | 10 | 1000 | 80 | 7 | 5721 | 15 | 9 | 86 | 330 | 330 | 331 | +| DISCOVER | 10 | 10000 | 100 | 1000 | 94 | 7 | 6437 | 16 | 18 | 84 | 354 | 353 | 397 | +| DISCOVER | 100 | 100000 | 10 | 500 | 26379 | 118 | 112840 | 10 | 0 | 92 | 2009 | 2045 | 2204 | +| DISCOVER | 100 | 100000 | 100 | 500 | 30609 | 139 | 123132 | 11 | 8 | 93 | 2229 | 2276 | 2416 | + +Like in the Register Response Times, the median response Times are fairly low. But, as more and more requests accumulate and the benchmark process Event Loop cannot handle efficiently all the client responses. In addition, memory and CPU usage also increased as more and more clients. + +**Discover with limit of 100** + +| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------| +| DISCOVER | 5 | 1000 | 10 | 500 | 4 | 4 | 16 | 34 | 0 | 103 | 110 | 111 | 111 | +| DISCOVER | 5 | 1000 | 100 | 500 | 5 | 5 | 16 | 29 | 0 | 89 | 111 | 113 | 113 | +| DISCOVER | 10 | 10000 | 10 | 500 | 166 | 9 | 6168 | 16 | 20 | 94 | 322 | 319 | 346 | +| DISCOVER | 10 | 10000 | 100 | 500 | 192 | 9 | 6658 | 18 | 18 | 93 | 352 | 352 | 353 | +| DISCOVER | 10 | 10000 | 10 | 1000 | 80 | 9 | 5274 | 18 | 19 | 97 | 326 | 325 | 362 | +| DISCOVER | 10 | 10000 | 100 | 1000 | 102 | 9 | 6701 | 15 | 18 | 98 | 320 | 315 | 346 | +| DISCOVER | 100 | 100000 | 10 | 500 | 29002 | 118 | 119308 | 10 | 0 | 149 | 2062 | 2021 | 2292 | +| DISCOVER | 100 | 100000 | 100 | 500 | 30290 | 114 | 127063 | 10 | 5 | 154 | 1995 | 2037 | 2120 | + +The difference in results between the default interval of 20 and bigger interval of 100 was not significant in any of the evaluated metrics. + +**Discover inexistent namespaces** + +| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------| +| DISCOVER | 5 | 0 | 10 | 500 | 16 | 15 | 56 | 24 | 39 | 42 | 96 | 96 | 103 | +| DISCOVER | 5 | 0 | 10 | 1000 | 17 | 16 | 107 | 20 | 26 | 44 | 102 | 103 | 110 | +| DISCOVER | 10 | 0 | 5 | 1000 | 19 | 18 | 88 | 65 | 65 | 65 | 90 | 90 | 90 | +| DISCOVER | 10 | 0 | 100 | 1000 | 20 | 19 | 61 | 23 | 21 | 56 | 101 | 101 | 109 | +| DISCOVER | 100 | 0 | 10 | 10000 | 241 | 228 | 1012 | 22 | 15 | 79 | 282 | 294 | 299 | +| DISCOVER | 100 | 0 | 100 | 10000 | 176 | 172 | 569 | 22 | 3 | 68 | 261 | 270 | 283 | +| DISCOVER | 10 | 10000 | 100 | 10000 | 37 | 26 | 7795 | 16 | 8 | 103 | 277 | 203 | 457 | +| DISCOVER | 100 | 10000 | 100 | 10000 | 348 | 266 | 7664 | 21 | 13 | 73 | 301 | 261 | 428 | +| DISCOVER | 10 | 100000 | 100 | 10000 | 291 | 26 | 162046 | 12 | 5 | 153 | 1956 | 1935 | 2191 | +| DISCOVER | 100 | 100000 | 100 | 10000 | 1779 | 291 | 178066 | 12 | 4 | 150 | 2392 | 2618 | 2828 | + +There were no different conclusions from observing these results compared to the previous ones. + +**Final Remarks** + +Generally, libp2p nodes who register namespaces aim to be found for providing a specific service to boost the network. This way, they will probably not be much interested in discovering other peers. On the other side, peers who will discover peers providing a given service will likely not provide themselves any service. Moreover, they will not be continuously trying to discover peers. This is not a rule, but a general expectation to consider the results obtained. For instance, this can be used to discover peers sharing a pubsub topics to improve a given topology. + +Taking into account the above consideration, let's consider a network of 1 Million libp2p nodes interacting with a rendezvous server where the average requests per 24 Hours are 8 requests per peer. This means around: + +- 8M requests per day +- 333.333k requests per hour +- 5.555k requests per minute +- 92 requests per second + +In this context, around 92 connections would be established per second to create a rendezvous request, wait for it to be processed and receive a response. This will be bigger if the network gets bigger, or if peers do more requests than the mentioned average. In addition, DoS attacks might also happen. + +Comparing these numbers with the results obtained, we can see that for around 100 connections simultaneously interacting with the rendezvous server, the median Round Trip times, memory consumption and CPU usage are within acceptable intervals. However, as requests accumulate it is also likely that some requests will take longer to process, even with a client per machine state. It is important highlighting that running 100+ clients in a single machine & process will also be a bottleneck and results should be better in "real" environment. + +The ideal scenario for a deployment of a Rendezvous Server will be to have clusters of rendezvous servers backed by a federated DB. This would guarantee that the server keeps healthy and with good response times. For the above example with 1M nodes, we should probably have a cluster with 3 rendezvous servers, which would receive an average of 30 requests per second. The average does not mean that in certain times they can get to 50 requests per second, or even more. diff --git a/benchmarks/id.json b/benchmarks/id.json new file mode 100644 index 0000000..1373524 --- /dev/null +++ b/benchmarks/id.json @@ -0,0 +1,5 @@ +{ + "id": "12D3KooWPCDhKA2NoJCnxUqKmBNLVg6ekNzcw9GVsncLgfdbN2pm", + "privKey": "CAESYDprk82zAJeNcIHxgj3seEWRLCOh+7e7yTBVx1IW38HnxsEgoQGbW5xJDd7GHml2Mb8LNxsdB+WgznkhDLYZL97GwSChAZtbnEkN3sYeaXYxvws3Gx0H5aDOeSEMthkv3g==", + "pubKey": "CAESIMbBIKEBm1ucSQ3exh5pdjG/CzcbHQfloM55IQy2GS/e" +} diff --git a/benchmarks/index.js b/benchmarks/index.js new file mode 100644 index 0000000..20f2a59 --- /dev/null +++ b/benchmarks/index.js @@ -0,0 +1,294 @@ +'use strict' + +/* eslint-disable no-console */ + +const fs = require('fs') +const execa = require('execa') +const argv = require('minimist')(process.argv.slice(2)) +const microtime = require('microtime') +const path = require('path') +const pidusage = require('pidusage') +const pDefer = require('p-defer') +const delay = require('delay') +const uint8ArrayToString = require('uint8arrays/to-string') + +const { pipe } = require('it-pipe') +const lp = require('it-length-prefixed') +const { + collect, + tap +} = require('streaming-iterables') +const { toBuffer } = require('it-buffer') + +const Libp2p = require('libp2p') +const PeerId = require('peer-id') + +const docker = require('../mysql-local/docker') +const ServerPeerId = require('./id.json') +const { median } = require('./utils') + +const { + PROTOCOL_MULTICODEC +} = require('../src/constants') +const { Message } = require('../src/proto') +const MESSAGE_TYPE = Message.MessageType + +const { defaultLibp2pConfig } = require('../test/utils') + +/** + * Setup Rendezvous server process and multiple clients with a connection with the server. + * Outputs metrics reports on teardown. + * + * @param {number} nClients + * @returns {{ connections: Connection[], clients: Libp2p[], teardown: () => void}} + */ +const setupRendezvous = async (nClients) => { + // Setup datastore + console.log('1. Datastore setup') + const containerId = await docker.start() + + // Setup Server + console.log('2. Rendezvous Server setup') + const serverDefer = pDefer() + // Increase max peer registrations to avoid DoS protection + const serverProcess = execa('node', [path.join(__dirname, '../src/bin.js'), '--peerId', './id.json', '--maxPeerRegistrations', '1000000'], { + cwd: path.resolve(__dirname), + all: true + }) + serverProcess.all.on('data', (data) => { + process.stdout.write(data) + const output = uint8ArrayToString(data) + + if (output.includes('Rendezvous server listening on:')) { + serverDefer.resolve() + } + }) + + const serverProcessId = serverProcess.pid + + await serverDefer.promise + + const serverPeerId = await PeerId.createFromJSON(ServerPeerId) + const serverMultiaddr = `/ip4/127.0.0.1/tcp/15003/ws/p2p/${serverPeerId.toB58String()}` + + const clients = [] + const connections = [] + + for (let i = 0; i < nClients; i++) { + console.log(`3. Rendezvous Client ${i} setup`) + + // Setup Client + const client = await Libp2p.create({ + ...defaultLibp2pConfig, + addresses: { + listen: ['/ip4/127.0.0.1/tcp/0/ws'] + } + }) + + await client.start() + const connection = await client.dial(serverMultiaddr) + + clients.push(client) + connections.push(connection) + } + + return { + clients, + connections, + serverProcessId, + teardown: async () => { + serverProcess.kill() + + await Promise.all([ + serverProcess, + clients.map((client) => client.stop()) + ]) + + docker.stop(containerId) + } + } +} + +// Populate registration in series from each client +const populateRegistrations = async (clients, connections, nRes, nNamespaces) => { + for (let i = 0; i < nRes; i++) { + const pIndex = i % clients.length + + const signedPeerRecord = clients[pIndex].peerStore.addressBook.getRawEnvelope(clients[pIndex].peerId) + const source = [Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord, + ns: `${i % nNamespaces}`, + ttl: 10000 + } + })] + + const { stream } = await connections[pIndex].newStream(PROTOCOL_MULTICODEC) + await pipe( + source, + lp.encode(), + stream, + lp.decode() + ) + } +} + +const createDiscoverMessages = (nRuns, nClients, nNamespaces, limit = 20, discoverInexistentNamespaces = false) => { + return Array.from({ length: nRuns / nClients }, (_, runIndex) => Message.encode({ + type: MESSAGE_TYPE.DISCOVER, + discover: { + ns: discoverInexistentNamespaces ? `${runIndex % nNamespaces}` : `invalid${runIndex % nNamespaces}`, + limit + } + })) +} + +const createRegisterMessages = (nRuns, nClients, nNamespaces, signedPeerRecord) => { + return Array.from({ length: nRuns / nClients }, (_, runIndex) => Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord, + ns: `${runIndex % nNamespaces}`, + ttl: 10000 + } + })) +} + +const sendParallelMessages = async (clients, connections, nRuns, nNamespaces, type = 'REGISTER', discoverLimit, discoverInexistentNamespaces) => { + let countErrors = 0 + const responseTimes = [] + + await Promise.all(Array.from({ length: clients.length }, async (_, i) => { + const responses = [] + + let source + if (type === 'DISCOVER') { + source = createDiscoverMessages(nRuns, clients.length, nNamespaces, discoverLimit, discoverInexistentNamespaces) + } else { + const signedPeerRecord = clients[i].peerStore.addressBook.getRawEnvelope(clients[i].peerId) + source = createRegisterMessages(nRuns, clients.length, nNamespaces, signedPeerRecord) + } + + for (let mIndex = 0; mIndex < source.length; mIndex++) { + const { stream } = await connections[i].newStream(PROTOCOL_MULTICODEC) + + let start, end + const r = await pipe( + [source[mIndex]], + lp.encode(), + tap(() => { + start = microtime.now() + }), + stream, + tap(() => { + end = microtime.now() + }), + lp.decode(), + toBuffer, + collect + ) + responseTimes.push(end - start) + responses.push(r[0]) + } + + responses.forEach((r) => { + const m = Message.decode(r) + if ((m.registerResponse && m.registerResponse.status !== 0) || (m.discoverResponse && m.discoverResponse.status !== 0)) { + countErrors++ + } + }) + })) + + return { + responseTimes, + countErrors + } +} + +const computePidUsage = (pid) => { + const measuremennts = [] + + const _intervalId = setInterval(() => { + pidusage(pid, (_, { cpu, memory }) => { + measuremennts.push({ cpu, memory }) + }) + }, 500) + + return { + measuremennts, + teardown: () => clearInterval(_intervalId) + } +} + +const tableHeader = `| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------|` + +// Usage: $0 [--nClients ] [--nNamespaces ] [--initialRegistrations ] +// [--benchmarkRuns ] [--benchmarkType ] [--outputFile ] +// [--discoverLimit ] [--discoverInexistentNamespaces] +const main = async () => { + const nClients = argv.nClients || 4 + const nNamespaces = argv.nNamespaces || 5 + + const initalRegistrations = argv.initialRegistrations || 100 + const benchmarkRuns = argv.benchmarkRuns || 500 + const benchmarkType = argv.benchmarkType === 'DISCOVER' ? 'DISCOVER' : 'REGISTER' + + const outputPath = argv.outputFile || './output.md' + + const discoverLimit = argv.discoverLimit ? Number(argv.discoverLimit) : 20 + const discoverInexistentNamespaces = argv.discoverInexistentNamespaces + + // Setup datastore, server and clients + console.log('==========--- Setup ---==========') + const { clients, connections, serverProcessId, teardown } = await setupRendezvous(nClients) + + // Populate Initial State and prepare data in memory + console.log('==========--- Initial State Population ---==========') + await populateRegistrations(clients, connections, initalRegistrations, nNamespaces) + + console.log('==========--- Start Benchmark ---==========') + const { measuremennts, teardown: pidUsageTeatdown } = computePidUsage(serverProcessId) + await delay(1000) + const { responseTimes, countErrors } = await sendParallelMessages(clients, connections, benchmarkRuns, nNamespaces, benchmarkType, discoverLimit, discoverInexistentNamespaces) + + pidUsageTeatdown() + console.log('==========--- Finished! Compute Metrics ---==========') + + console.log('operations errored', countErrors) + + const averageRT = Math.floor(responseTimes.reduce((a, b) => a + b, 0) / responseTimes.length / 1000) + const medianRT = Math.floor(median(responseTimes) / 1000) + const maxRT = Math.floor(Math.max(...responseTimes) / 1000) + + const cpuM = measuremennts.map((m) => m.cpu) + const averageCPU = Math.floor(cpuM.reduce((a, b) => a + b, 0) / cpuM.length) + const medianCPU = Math.floor(median(cpuM)) + const maxCPU = Math.floor(Math.max(...cpuM)) + + const memM = measuremennts.map((m) => m.memory) + const averageMem = Math.floor(memM.reduce((a, b) => a + b, 0) * Math.pow(10, -6) / measuremennts.length) + const medianMem = Math.floor(median(memM) * Math.pow(10, -6)) + const maxMem = Math.floor(Math.max(...memM) * Math.pow(10, -6)) + + const resultString = `| ${benchmarkType} | ${nClients} | ${initalRegistrations} | ${nNamespaces} | ${benchmarkRuns} | ${averageRT} | ${medianRT} | ${maxRT} | ${averageCPU} | ${medianCPU} | ${maxCPU} | ${averageMem} | ${medianMem} | ${maxMem} |` + + console.log(tableHeader) + console.log(resultString) + + await delay(4000) + await teardown() + + try { + if (fs.existsSync(outputPath)) { + fs.appendFileSync(outputPath, `\n${resultString}`) + } else { + fs.appendFileSync(outputPath, `${tableHeader}\n${resultString}`) + } + } catch (err) { + console.error(err) + } +} + +main() diff --git a/benchmarks/output-discover-inexistent.md b/benchmarks/output-discover-inexistent.md new file mode 100644 index 0000000..e9dfdce --- /dev/null +++ b/benchmarks/output-discover-inexistent.md @@ -0,0 +1,12 @@ +| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------| +| DISCOVER | 5 | 0 | 10 | 500 | 16 | 15 | 56 | 24 | 39 | 42 | 96 | 96 | 103 | +| DISCOVER | 5 | 0 | 10 | 1000 | 17 | 16 | 107 | 20 | 26 | 44 | 102 | 103 | 110 | +| DISCOVER | 10 | 0 | 5 | 1000 | 19 | 18 | 88 | 65 | 65 | 65 | 90 | 90 | 90 | +| DISCOVER | 10 | 0 | 100 | 1000 | 20 | 19 | 61 | 23 | 21 | 56 | 101 | 101 | 109 | +| DISCOVER | 100 | 0 | 10 | 10000 | 241 | 228 | 1012 | 22 | 15 | 79 | 282 | 294 | 299 | +| DISCOVER | 100 | 0 | 100 | 10000 | 176 | 172 | 569 | 22 | 3 | 68 | 261 | 270 | 283 | +| DISCOVER | 10 | 10000 | 100 | 10000 | 37 | 26 | 7795 | 16 | 8 | 103 | 277 | 203 | 457 | +| DISCOVER | 100 | 10000 | 100 | 10000 | 348 | 266 | 7664 | 21 | 13 | 73 | 301 | 261 | 428 | +| DISCOVER | 10 | 100000 | 100 | 10000 | 291 | 26 | 162046 | 12 | 5 | 153 | 1956 | 1935 | 2191 | +| DISCOVER | 100 | 100000 | 100 | 10000 | 1779 | 291 | 178066 | 12 | 4 | 150 | 2392 | 2618 | 2828 | \ No newline at end of file diff --git a/benchmarks/output-discover-limit-100.md b/benchmarks/output-discover-limit-100.md new file mode 100644 index 0000000..935a65b --- /dev/null +++ b/benchmarks/output-discover-limit-100.md @@ -0,0 +1,10 @@ +| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------| +| DISCOVER | 5 | 1000 | 10 | 500 | 4 | 4 | 16 | 34 | 0 | 103 | 110 | 111 | 111 | +| DISCOVER | 5 | 1000 | 100 | 500 | 5 | 5 | 16 | 29 | 0 | 89 | 111 | 113 | 113 | +| DISCOVER | 10 | 10000 | 10 | 500 | 166 | 9 | 6168 | 16 | 20 | 94 | 322 | 319 | 346 | +| DISCOVER | 10 | 10000 | 100 | 500 | 192 | 9 | 6658 | 18 | 18 | 93 | 352 | 352 | 353 | +| DISCOVER | 10 | 10000 | 10 | 1000 | 80 | 9 | 5274 | 18 | 19 | 97 | 326 | 325 | 362 | +| DISCOVER | 10 | 10000 | 100 | 1000 | 102 | 9 | 6701 | 15 | 18 | 98 | 320 | 315 | 346 | +| DISCOVER | 100 | 100000 | 10 | 500 | 29002 | 118 | 119308 | 10 | 0 | 149 | 2062 | 2021 | 2292 | +| DISCOVER | 100 | 100000 | 100 | 500 | 30290 | 114 | 127063 | 10 | 5 | 154 | 1995 | 2037 | 2120 | diff --git a/benchmarks/output-discover-limit.md b/benchmarks/output-discover-limit.md new file mode 100644 index 0000000..6f1951a --- /dev/null +++ b/benchmarks/output-discover-limit.md @@ -0,0 +1,10 @@ +| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------| +| DISCOVER | 5 | 1000 | 10 | 500 | 4 | 4 | 12 | 24 | 34 | 59 | 115 | 115 | 116 | +| DISCOVER | 5 | 1000 | 100 | 500 | 5 | 5 | 17 | 29 | 32 | 66 | 114 | 113 | 115 | +| DISCOVER | 10 | 10000 | 10 | 500 | 144 | 7 | 5129 | 17 | 20 | 67 | 332 | 330 | 359 | +| DISCOVER | 10 | 10000 | 100 | 500 | 177 | 7 | 6505 | 17 | 9 | 88 | 367 | 369 | 370 | +| DISCOVER | 10 | 10000 | 10 | 1000 | 80 | 7 | 5721 | 15 | 9 | 86 | 330 | 330 | 331 | +| DISCOVER | 10 | 10000 | 100 | 1000 | 94 | 7 | 6437 | 16 | 18 | 84 | 354 | 353 | 397 | +| DISCOVER | 100 | 100000 | 10 | 500 | 26379 | 118 | 112840 | 10 | 0 | 92 | 2009 | 2045 | 2204 | +| DISCOVER | 100 | 100000 | 100 | 500 | 30609 | 139 | 123132 | 11 | 8 | 93 | 2229 | 2276 | 2416 | diff --git a/benchmarks/output-register.md b/benchmarks/output-register.md new file mode 100644 index 0000000..52c877f --- /dev/null +++ b/benchmarks/output-register.md @@ -0,0 +1,16 @@ +| Type | Clients | Io Reg | Namespaces | Ops | Avg RT | Median RT | Max RT | Avg CPU | Median CPU | Max CPU | Avg Mem | Median Mem | Max Mem | +|----------|---------|--------|------------|-----|--------|-----------|--------|---------|------------|---------|---------|------------|---------| +| REGISTER | 5 | 100 | 10 | 500 | 16 | 15 | 26 | 31 | 42 | 61 | 98 | 98 | 106 | +| REGISTER | 5 | 1000 | 10 | 500 | 14 | 14 | 26 | 34 | 37 | 67 | 113 | 112 | 114 | +| REGISTER | 10 | 1000 | 10 | 500 | 23 | 22 | 55 | 34 | 23 | 60 | 143 | 141 | 149 | +| REGISTER | 100 | 1000 | 10 | 500 | 221 | 224 | 308 | 23 | 22 | 48 | 133 | 132 | 136 | +| REGISTER | 100 | 1000 | 10 | 1000 | 276 | 277 | 410 | 26 | 41 | 54 | 138 | 134 | 165 | +| REGISTER | 100 | 10000 | 10 | 500 | 1900 | 282 | 8468 | 15 | 18 | 75 | 364 | 362 | 382 | +| REGISTER | 100 | 10000 | 10 | 1000 | 1061 | 292 | 8017 | 15 | 19 | 76 | 393 | 392 | 397 | +| REGISTER | 50 | 100000 | 10 | 500 | 23686 | 358 | 57365 | 11 | 8 | 88 | 2341 | 2334 | 2645 | +| REGISTER | 50 | 100000 | 10 | 1000 | 10055 | 425 | 56977 | 10 | 0 | 89 | 2501 | 2543 | 2887 | +| REGISTER | 100 | 100000 | 10 | 500 | 45273 | 674 | 55370 | 11 | 6 | 95 | 2691 | 2718 | 2787 | +| REGISTER | 100 | 100000 | 10 | 1000 | 22849 | 870 | 55060 | 11 | 11 | 88 | 2166 | 2225 | 2522 | +| REGISTER | 200 | 100000 | 10 | 500 | 24572 | 2456 | 108989 | 10 | 10 | 90 | 2468 | 2476 | 2655 | +| REGISTER | 200 | 100000 | 10 | 1000 | 61448 | 2069 | 299530 | 10 | 3 | 87 | 2515 | 2545 | 2742 | +| REGISTER | 200 | 200000 | 10 | 1000 | 168163 | 2485 | 830998 | 12 | 3 | 93 | 2814 | 2896 | 3286 | diff --git a/benchmarks/run.sh b/benchmarks/run.sh new file mode 100755 index 0000000..d33604e --- /dev/null +++ b/benchmarks/run.sh @@ -0,0 +1,51 @@ +# Register + +node index.js --nClients 5 --initialRegistrations 0 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 5 --initialRegistrations 1000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 10 --initialRegistrations 1000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 100 --initialRegistrations 1000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 100 --initialRegistrations 1000 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 100 --initialRegistrations 10000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 100 --initialRegistrations 10000 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 50 --initialRegistrations 100000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 50 --initialRegistrations 100000 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 100 --initialRegistrations 100000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 100 --initialRegistrations 100000 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 200 --initialRegistrations 100000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 200 --initialRegistrations 100000 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' +node index.js --nClients 200 --initialRegistrations 200000 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType REGISTER --outputFile './output-register.md' + +# Discover (limit 20) + +node index.js --nClients 5 --initialRegistrations 1000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType DISCOVER --outputFile './output-discover-limit.md' +node index.js --nClients 5 --initialRegistrations 1000 --benchmarkRuns 500 --nNamespaces 100 --benchmarkType DISCOVER --outputFile './output-discover-limit.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType DISCOVER --outputFile './output-discover-limit.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 500 --nNamespaces 100 --benchmarkType DISCOVER --outputFile './output-discover-limit.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType DISCOVER --outputFile './output-discover-limit.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 1000 --nNamespaces 100 --benchmarkType DISCOVER --outputFile './output-discover-limit.md' +node index.js --nClients 100 --initialRegistrations 100000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType DISCOVER --outputFile './output-discover-limit.md' +node index.js --nClients 100 --initialRegistrations 100000 --benchmarkRuns 500 --nNamespaces 100 --benchmarkType DISCOVER --outputFile './output-discover-limit.md' + +# # Discover (limit 100) + +node index.js --nClients 5 --initialRegistrations 1000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType DISCOVER --discoverLimit 100 --outputFile './output-discover-limit-100.md' +node index.js --nClients 5 --initialRegistrations 1000 --benchmarkRuns 500 --nNamespaces 100 --benchmarkType DISCOVER --discoverLimit 100 --outputFile './output-discover-limit-100.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType DISCOVER --discoverLimit 100 --outputFile './output-discover-limit-100.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 500 --nNamespaces 100 --benchmarkType DISCOVER --discoverLimit 100 --outputFile './output-discover-limit-100.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType DISCOVER --discoverLimit 100 --outputFile './output-discover-limit-100.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 1000 --nNamespaces 100 --benchmarkType DISCOVER --discoverLimit 100 --outputFile './output-discover-limit-100.md' +node index.js --nClients 100 --initialRegistrations 100000 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType DISCOVER --discoverLimit 100 --outputFile './output-discover-limit-100.md' +node index.js --nClients 100 --initialRegistrations 100000 --benchmarkRuns 500 --nNamespaces 100 --benchmarkType DISCOVER --discoverLimit 100 --outputFile './output-discover-limit-100.md' + +# # Discover inexistent + +node index.js --nClients 5 --initialRegistrations 0 --benchmarkRuns 500 --nNamespaces 10 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 5 --initialRegistrations 0 --benchmarkRuns 1000 --nNamespaces 10 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 10 --initialRegistrations 0 --benchmarkRuns 1000--nNamespaces 10 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 10 --initialRegistrations 0 --benchmarkRuns 1000 --nNamespaces 100 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 100 --initialRegistrations 0 --benchmarkRuns 10000 --nNamespaces 10 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 100 --initialRegistrations 0 --benchmarkRuns 10000 --nNamespaces 100 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 10 --initialRegistrations 10000 --benchmarkRuns 10000 --nNamespaces 100 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 100 --initialRegistrations 10000 --benchmarkRuns 10000 --nNamespaces 100 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 10 --initialRegistrations 100000 --benchmarkRuns 10000 --nNamespaces 100 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' +node index.js --nClients 100 --initialRegistrations 100000 --benchmarkRuns 10000 --nNamespaces 100 --benchmarkType DISCOVER --discoverInexistentNamespaces --outputFile './output-discover-inexistent.md' \ No newline at end of file diff --git a/benchmarks/utils.js b/benchmarks/utils.js new file mode 100644 index 0000000..a21d721 --- /dev/null +++ b/benchmarks/utils.js @@ -0,0 +1,21 @@ +'use strict' + +function median (values) { + if (values.length === 0) { + return 0 + } + + values.sort((a, b) => a - b) + + var half = Math.floor(values.length / 2) + + if (values.length % 2) { + return values[half] + } + + return (values[half - 1] + values[half]) / 2.0 +} + +module.exports = { + median +} diff --git a/img/db-model.png b/img/db-model.png new file mode 100644 index 0000000..4f1fb87 Binary files /dev/null and b/img/db-model.png differ diff --git a/mysql-local/docker-compose.yml b/mysql-local/docker-compose.yml new file mode 100644 index 0000000..e98c262 --- /dev/null +++ b/mysql-local/docker-compose.yml @@ -0,0 +1,37 @@ +version: '3.2' +services: + db: + image: mysql:8 + volumes: + - mysql-db:/var/lib/mysql + command: --default-authentication-plugin=mysql_native_password + restart: always + environment: + - MYSQL_ROOT_PASSWORD=my-secret-pw + - MYSQL_DATABASE=libp2p_rendezvous_db + ports: + - "3306:3306" + healthcheck: + test: ["CMD-SHELL", 'mysqladmin ping'] + interval: 10s + timeout: 2s + retries: 10 + server: + image: libp2p-rendezvous + volumes: + - ./id.json:/etc/opt/rendezvous/id.json + ports: + - "8000:8000" + - "8003:8003" + - "15003:15003" + restart: always + environment: + - DATASTORE_PASSWORD=my-secret-pw + - DATASTORE_DATABASE=libp2p_rendezvous_db + - DATASTORE_HOST=db + - PEER_ID=/etc/opt/rendezvous/id.json + depends_on: + db: + condition: service_healthy +volumes: + mysql-db: \ No newline at end of file diff --git a/mysql-local/docker.js b/mysql-local/docker.js new file mode 100644 index 0000000..34878ea --- /dev/null +++ b/mysql-local/docker.js @@ -0,0 +1,33 @@ +'use strict' + +const delay = require('delay') +const execa = require('execa') +const pWaitFor = require('p-wait-for') + +module.exports = { + start: async (port = 3306, pw = 'test-secret-pw', database = 'libp2p_rendezvous_db') => { + const procResult = execa.commandSync(`docker run -p 3306:${port} -e MYSQL_ROOT_PASSWORD=${pw} -e MYSQL_DATABASE=${database} -d mysql:8 --default-authentication-plugin=mysql_native_password`, { + all: true + }) + const containerId = procResult.stdout + + console.log(`wait for docker container ${containerId} to be ready`) + + await pWaitFor(() => { + const procCheck = execa.commandSync(`docker logs ${containerId}`) + const logs = procCheck.stdout + procCheck.stderr // Docker/MySQL sends to the stderr the ready for connections... + + return logs.includes('ready for connections') + }, { + interval: 5000 + }) + // Some more time waiting to properly setup the container + await delay(12e3) + + return containerId + }, + stop: (containerId) => { + console.log('docker container is stopping') + execa.commandSync(`docker stop ${containerId}`) + } +} \ No newline at end of file diff --git a/package.json b/package.json index 8e3af76..3f2bdb6 100644 --- a/package.json +++ b/package.json @@ -1,11 +1,28 @@ { "name": "libp2p-rendezvous", "version": "0.0.0", - "description": "A javascript implementation of the rendezvous protocol for libp2p", - "leadMaintainer": "Vasco Santos ", - "main": "index.js", - "scripts": { - "test": "aegir test" + "description": "Javascript implementation of the rendezvous protocol server for libp2p", + "leadMaintainer": "Vasco Santos ", + "main": "src/index.js", + "types": "dist/src/index.d.ts", + "typesVersions": { + "*": { + "src/*": [ + "dist/src/*", + "dist/src/*/index" + ] + } + }, + "bin": { + "libp2p-rendezvous-server": "src/bin.js" + }, + "files": [ + "dist", + "src" + ], + "repository": { + "type": "git", + "url": "git+https://github.com/libp2p/js-libp2p-rendezvous.git" }, "keywords": [ "libp2p", @@ -13,28 +30,66 @@ "protocol", "discovery" ], - "author": "Maciej Krüger ", + "bugs": { + "url": "https://github.com/libp2p/js-libp2p-rendezvous/issues" + }, + "homepage": "https://github.com/libp2p/js-libp2p-rendezvous", "license": "MIT", - "dependencies": { - "chai": "^4.1.2", - "dirty-chai": "^2.0.1", - "protons": "^1.0.1", - "pull-protocol-buffers": "^0.1.2" + "engines": { + "node": ">=12.0.0", + "npm": ">=6.0.0" }, - "devDependencies": { - "aegir": "^13.1.0", - "libp2p": "^0.20.2", - "libp2p-mplex": "^0.7.0", - "libp2p-secio": "^0.10.0", - "libp2p-spdy": "^0.12.1", - "libp2p-tcp": "^0.12.0" + "browser": { + "mysql": false }, - "repository": { - "type": "git", - "url": "git+https://github.com/mkg20001/libp2p-rendezvous.git" + "scripts": { + "lint": "aegir lint", + "build": "aegir build", + "test": "aegir test", + "test:node": "aegir test -t node", + "test:browser": "aegir test -t browser", + "release": "aegir release", + "release-minor": "aegir release --type minor", + "release-major": "aegir release --type major", + "coverage": "nyc --reporter=text --reporter=lcov npm test" }, - "bugs": { - "url": "https://github.com/mkg20001/libp2p-rendezvous/issues" + "dependencies": { + "debug": "^4.2.0", + "err-code": "^2.0.3", + "es6-promisify": "^6.1.1", + "it-buffer": "^0.1.2", + "it-length-prefixed": "^3.1.0", + "it-pipe": "^1.1.0", + "libp2p": "^0.30.0", + "libp2p-mplex": "^0.10.0", + "libp2p-noise": "^2.0.1", + "libp2p-tcp": "^0.15.1", + "libp2p-websockets": "^0.14.0", + "menoetius": "0.0.2", + "minimist": "^1.2.5", + "multiaddr": "^8.0.0", + "mysql": "^2.18.1", + "p-retry": "^4.2.0", + "peer-id": "^0.14.1", + "protons": "^2.0.0", + "set-delayed-interval": "^1.0.0", + "uint8arrays": "^2.0.5" }, - "homepage": "https://github.com/mkg20001/libp2p-rendezvous#readme" + "devDependencies": { + "aegir": "^29.2.2", + "chai": "^4.2.0", + "chai-as-promised": "^7.1.1", + "delay": "^4.4.0", + "dirty-chai": "^2.0.1", + "execa": "^5.0.0", + "ipfs-utils": "^5.0.1", + "is-ci": "^2.0.0", + "microtime": "^3.0.0", + "p-defer": "^3.0.0", + "p-times": "^3.0.0", + "p-wait-for": "^3.1.0", + "pidusage": "^2.0.21", + "sinon": "^9.0.3", + "streaming-iterables": "^5.0.3" + } } diff --git a/src/bin.js b/src/bin.js new file mode 100644 index 0000000..bc713b3 --- /dev/null +++ b/src/bin.js @@ -0,0 +1,120 @@ +#!/usr/bin/env node + +'use strict' + +// Usage: $0 [--datastoreHost ] [--datastoreUser ] [datastorePassword ] [datastoreDatabase ] [--enableMemoryDatabase] +// [--peerId ] [--listenMultiaddrs ... ] [--announceMultiaddrs ... ] [--metricsPort ] [--disableMetrics] +// [--maxPeerRegistrations ] + +/* eslint-disable no-console */ + +const debug = require('debug') +const log = debug('libp2p:rendezvous:bin') + +const fs = require('fs') +const http = require('http') +const menoetius = require('menoetius') +const argv = require('minimist')(process.argv.slice(2)) + +const TCP = require('libp2p-tcp') +const Websockets = require('libp2p-websockets') +const Muxer = require('libp2p-mplex') +const { NOISE: Crypto } = require('libp2p-noise') + +const PeerId = require('peer-id') + +const RendezvousServer = require('./index') +const Datastore = require('./datastores/mysql') +const DatastoreMemory = require('./datastores/memory') +const { getAnnounceAddresses, getListenAddresses } = require('./utils') + +async function main () { + // Datastore + const memoryDatabase = (argv.enableMemoryDatabase || argv.emd || process.env.DISABLE_METRICS) + const host = argv.datastoreHost || argv.dh || process.env.DATASTORE_HOST || 'localhost' + const user = argv.datastoreUser || argv.du || process.env.DATASTORE_USER || 'root' + const password = argv.datastorePassword || argv.dp || process.env.DATASTORE_PASSWORD || 'test-secret-pw' + const database = argv.datastoreDatabase || argv.dd || process.env.DATASTORE_DATABASE || 'libp2p_rendezvous_db' + + // Metrics + let metricsServer + const metrics = !(argv.disableMetrics || process.env.DISABLE_METRICS) + const metricsPort = argv.metricsPort || argv.mp || process.env.METRICS_PORT || '8003' + + // Multiaddrs + const listenAddresses = getListenAddresses(argv) + const announceAddresses = getAnnounceAddresses(argv) + + // PeerId + let peerId + if (argv.peerId || process.env.PEER_ID) { + const peerData = fs.readFileSync(argv.peerId || process.env.PEER_ID) + peerId = await PeerId.createFromJSON(JSON.parse(peerData.toString())) + } else { + peerId = await PeerId.create() + log('You are using an automatically generated peer.') + log('If you want to keep the same address for the server you should provide a peerId with --peerId ') + } + + // Rendezvous server configuration + const maxPeerRegistrations = argv.maxPeerRegistrations ? Number(argv.maxPeerRegistrations) : 1000 + + const datastore = memoryDatabase ? new DatastoreMemory() : new Datastore({ + host, + user, + password, + database + }) + + // Create Rendezvous server + const rendezvousServer = new RendezvousServer({ + modules: { + transport: [Websockets, TCP], + streamMuxer: [Muxer], + connEncryption: [Crypto] + }, + peerId, + addresses: { + listen: listenAddresses, + announce: announceAddresses + } + }, { datastore, maxPeerRegistrations }) + + console.log('Rendezvous server is starting') + await rendezvousServer.start() + + rendezvousServer.peerStore.on('change:multiaddrs', ({ peerId, multiaddrs }) => { + console.log('Rendezvous server listening on:') + if (peerId.equals(rendezvousServer.peerId)) { + multiaddrs.forEach((m) => console.log(`${m}/p2p/${peerId.toB58String()}`)) + } + }) + + if (metrics) { + log('enabling metrics') + metricsServer = http.createServer((req, res) => { + if (req.url !== '/metrics') { + res.statusCode = 200 + res.end() + } + }) + + menoetius.instrument(metricsServer) + + metricsServer.listen(metricsPort, '0.0.0.0', () => { + console.log(`metrics server listening on ${metricsPort}`) + }) + } + + const stop = async () => { + console.log('Stopping...') + await rendezvousServer.stop() + metricsServer && await metricsServer.close() + process.exit(0) + } + + process.on('SIGTERM', stop) + process.on('SIGINT', stop) +} + +main() diff --git a/src/constants.js b/src/constants.js new file mode 100644 index 0000000..cc70e50 --- /dev/null +++ b/src/constants.js @@ -0,0 +1,13 @@ +'use strict' + +exports.MAX_NS_LENGTH = 255 +exports.MAX_DISCOVER_LIMIT = 1000 +exports.MAX_PEER_REGISTRATIONS = 1000 +exports.MIN_TTL = 7.2e6 +exports.MAX_TTL = 2.592e+8 +exports.PROTOCOL_MULTICODEC = '/rendezvous/1.0.0' +exports.GC_BOOT_DELAY = 10e6 +exports.GC_INTERVAL = 7.2e6 +exports.GC_MIN_INTERVAL = 3e6 +exports.GC_MIN_REGISTRATIONS = 1000 +exports.GC_MAX_REGISTRATIONS = 10e6 diff --git a/src/datastores/README.md b/src/datastores/README.md new file mode 100644 index 0000000..6076536 --- /dev/null +++ b/src/datastores/README.md @@ -0,0 +1,13 @@ +# Rendezvous Datastores + +The `libp2p-rendezvous` server will store rendezvous records over time. This number might increase exponentially over time, even with a garbage collector for removing outdated records. Accordingly, this server should leverage a database to store records in an efficient fashion. + +A `MySQL` backed datastore is provided in this repository and is used by default by its server implementation. Other databases can easily be used by implementing a datastore fulfilling the [interface.ts](./interface.js). + +⚠️ For testing purposes you can skip using MySQL and use a memory datastore. **This must not be used in production!**. + +## MySQL Data Model + +The MySQL database data model created is illustrated in the following picture: + +![Data Model](../../../img/db-model.png) diff --git a/src/datastores/interface.ts b/src/datastores/interface.ts new file mode 100644 index 0000000..b7be9cc --- /dev/null +++ b/src/datastores/interface.ts @@ -0,0 +1,55 @@ +import PeerId from 'peer-id' + +export interface DatastoreFactory { + new (options?: DatastoreOptions): Datastore; +} + +export interface Datastore { + /** + * Setup datastore. + */ + start (): Promise; + /** + * Tear down datastore. + */ + stop (): void; + /** + * Run datastore garbage collector to remove expired records. + */ + gc (): Promise; + /** + * Add a rendezvous registrations. + */ + addRegistration (namespace: string, peerId: PeerId, signedPeerRecord: Uint8Array, ttl: number): Promise; + /** + * Get rendezvous registrations for a given namespace. + */ + getRegistrations (namespace: string, query?: RegistrationQuery): Promise<{ registrations: Registration[], cookie?: string }>; + /** + * Get number of registrations of a given peer. + */ + getNumberOfRegistrationsFromPeer (peerId: PeerId): Promise; + /** + * Remove registration of a given namespace to a peer. + */ + removeRegistration (ns: string, peerId: PeerId): Promise; + /** + * Remove all registrations of a given peer. + */ + removePeerRegistrations (peerId: PeerId): Promise; + /** + * Reset content + */ + reset (): Promise; +} + +export type RegistrationQuery = { + limit?: number; + cookie?: string; +} + +export type Registration = { + ns: string; + signedPeerRecord: Uint8Array; + ttl: number; +} diff --git a/src/datastores/memory.js b/src/datastores/memory.js new file mode 100644 index 0000000..4c4fcfa --- /dev/null +++ b/src/datastores/memory.js @@ -0,0 +1,251 @@ +'use strict' + +const debug = require('debug') +const log = debug('libp2p:rendezvous-server:memory') +log.error = debug('libp2p:rendezvous-server:memory:error') + +const errCode = require('err-code') +const { codes: errCodes } = require('../errors') + +const PeerId = require('peer-id') + +/** + * @typedef {import('peer-id')} PeerId + * @typedef {import('./interface').Datastore} Datastore + * @typedef {import('./interface').Registration} Registration + */ + +/** * + * + * @typedef {Object} NamespaceRegistration + * @property {string} id random generated id to map cookies + * @property {Uint8Array} signedPeerRecord + * @property {number} expiration + */ + +/** + * @implements {Datastore} + */ +class Memory { + /** + * Memory datastore for libp2p rendezvous. + */ + constructor () { + /** + * Registrations per namespace, where a registration maps peer id strings to a namespace reg. + * + * @type {Map>} + */ + this.nsRegistrations = new Map() + + /** + * Registration ids per cookie. + * + * @type {Map>} + */ + this.cookieRegistrations = new Map() + } + + /** + * @returns {Promise} + */ + start () { + return Promise.resolve() + } + + stop () {} + + reset () { + this.nsRegistrations.clear() + this.cookieRegistrations.clear() + return Promise.resolve() + } + + /** + * Run datastore garbage collector to remove expired records. + * + * @returns {Promise} + */ + gc () { + const now = Date.now() + const removedIds = [] + + // Iterate namespaces + this.nsRegistrations.forEach((nsEntry) => { + // Iterate registrations for namespaces + nsEntry.forEach((nsReg, idStr) => { + if (now >= nsReg.expiration) { + nsEntry.delete(idStr) + removedIds.push(nsReg.id) + + log(`gc removed namespace entry for ${idStr}`) + } + }) + }) + + // Remove outdated records references from cookies + for (const [key, idSet] of this.cookieRegistrations.entries()) { + const filteredIds = Array.from(idSet).filter((id) => !removedIds.includes(id)) + + if (filteredIds && filteredIds.length) { + this.cookieRegistrations.set(key, new Set(filteredIds)) + } else { + // Empty + this.cookieRegistrations.delete(key) + } + } + + return Promise.resolve(removedIds.length) + } + + /** + * Add an entry to the registration table. + * + * @param {string} ns + * @param {PeerId} peerId + * @param {Uint8Array} signedPeerRecord + * @param {number} ttl + * @returns {Promise} + */ + addRegistration (ns, peerId, signedPeerRecord, ttl) { + const nsReg = this.nsRegistrations.get(ns) || new Map() + + nsReg.set(peerId.toB58String(), { + id: String(Math.random() + Date.now()), + expiration: Date.now() + ttl, + signedPeerRecord + }) + + this.nsRegistrations.set(ns, nsReg) + + return Promise.resolve() + } + + /** + * Get registrations for a given namespace + * + * @param {string} ns + * @param {object} [options] + * @param {number} [options.limit = 10] + * @param {string} [options.cookie] + * @returns {Promise<{ registrations: Array, cookie: string }>} + */ + getRegistrations (ns, { limit = 10, cookie } = {}) { + const nsEntry = this.nsRegistrations.get(ns) || new Map() + const registrations = [] + + // Get the cookie registration if provided, create a cookie otherwise + let cRegistrations + if (cookie) { + cRegistrations = this.cookieRegistrations.get(cookie) + } else { + cRegistrations = new Set() + cookie = String(Math.random() + Date.now()) + } + + if (!cRegistrations) { + throw errCode(new Error('no registrations for the given cookie'), errCodes.INVALID_COOKIE) + } + + for (const [idStr, nsReg] of nsEntry.entries()) { + if (nsReg.expiration <= Date.now()) { + // Clean outdated registration from registrations and cookie record + nsEntry.delete(idStr) + cRegistrations.delete(nsReg.id) + continue + } + + // If this record was already sent, continue + if (cRegistrations.has(nsReg.id)) { + continue + } + + cRegistrations.add(nsReg.id) + registrations.push({ + ns, + signedPeerRecord: nsReg.signedPeerRecord, + ttl: nsReg.expiration - Date.now() // TODO: do not add if invalid? + }) + + // Stop if reached limit + if (registrations.length === limit) { + break + } + } + + // Save cookie registrations + this.cookieRegistrations.set(cookie, cRegistrations) + + return Promise.resolve({ + registrations, + cookie + }) + } + + /** + * Get number of registrations of a given peer. + * + * @param {PeerId} peerId + * @returns {Promise} + */ + getNumberOfRegistrationsFromPeer (peerId) { + const namespaces = [] + + this.nsRegistrations.forEach((nsEntry, namespace) => { + if (nsEntry.has(peerId.toB58String())) { + namespaces.push(namespace) + } + }) + + return Promise.resolve(namespaces.length) + } + + /** + * Remove registration of a given namespace to a peer + * + * @param {string} ns + * @param {PeerId} peerId + * @returns {Promise} + */ + removeRegistration (ns, peerId) { + let count = 0 + const nsReg = this.nsRegistrations.get(ns) + + if (nsReg && nsReg.delete(peerId.toB58String())) { + count += 1 + + // Remove registrations map to namespace if empty + if (!nsReg.size) { + this.nsRegistrations.delete(ns) + } + log('removed existing registrations for the namespace - peer pair:', ns, peerId.toB58String()) + } + + return Promise.resolve(count) + } + + /** + * Remove all registrations of a given peer + * + * @param {PeerId} peerId + * @returns {Promise} + */ + removePeerRegistrations (peerId) { + let count = 0 + for (const [ns, nsReg] of this.nsRegistrations.entries()) { + if (nsReg.delete(peerId.toB58String())) { + count += 1 + + // Remove registrations map to namespace if empty + if (!nsReg.size) { + this.nsRegistrations.delete(ns) + } + } + } + + log('removed existing registrations for peer', peerId.toB58String()) + return Promise.resolve(count) + } +} + +module.exports = Memory diff --git a/src/datastores/mysql.js b/src/datastores/mysql.js new file mode 100644 index 0000000..8421013 --- /dev/null +++ b/src/datastores/mysql.js @@ -0,0 +1,350 @@ +'use strict' + +const debug = require('debug') +const log = debug('libp2p:rendezvous-server:mysql') +log.error = debug('libp2p:rendezvous-server:mysql:error') + +const errCode = require('err-code') +const { codes: errCodes } = require('../errors') + +const mysql = require('mysql') +const pRetry = require('p-retry') + +/** + * @typedef {import('peer-id')} PeerId + * @typedef {import('./interface').Datastore} Datastore + * @typedef {import('./interface').Registration} Registration + */ + +/** + * @typedef {object} MySqlOptions + * @param {string} host + * @param {string} user + * @param {string} password + * @param {string} database + * @param {number} [connectionLimit = 20] + * @param {boolean} [insecureAuth = true] + * @param {boolean} [multipleStatements = true] + */ + +/** + * @implements {Datastore} + */ +class Mysql { + /** + * Database manager for libp2p rendezvous. + * + * @param {MySqlOptions} options + */ + constructor ({ host, user, password, database, connectionLimit = 20, insecureAuth = true, multipleStatements = true }) { + this.options = { + host, + user, + password, + database, + connectionLimit, + insecureAuth, + multipleStatements + } + + /** + * Peer string identifier with current add operations. + * + * @type {Map>} + */ + this._registeringPeer = new Map() + } + + /** + * Starts DB connection and creates needed tables if needed + * + * @returns {Promise} + */ + async start () { + // Retry starting the Database in case it is still booting + await pRetry(() => this._initDB()) + } + + /** + * Closes Database connection + */ + stop () { + this.pool.end() + } + + async reset () { + await new Promise((resolve, reject) => { + this.pool.query(` + DROP TABLE IF EXISTS cookie; + DROP TABLE IF EXISTS registration; + `, (err) => { + if (err) { + return reject(err) + } + resolve() + }) + }) + } + + /** + * Run datastore garbage collector to remove expired records. + * + * @returns {Promise} + */ + gc () { + return new Promise((resolve, reject) => { + this.pool.query('DELETE FROM registration WHERE expiration <= UNIX_TIMESTAMP(NOW())', + (err, res) => { + if (err) { + return reject(err) + } + resolve(res.affectedRows) + }) + }) + } + + /** + * Add an entry to the registration table. + * + * @param {string} namespace + * @param {PeerId} peerId + * @param {Uint8Array} signedPeerRecord + * @param {number} ttl + * @returns {Promise} + */ + addRegistration (namespace, peerId, signedPeerRecord, ttl) { + const id = peerId.toB58String() + const opId = String(Math.random() + Date.now()) + const peerOps = this._registeringPeer.get(id) || new Set() + + peerOps.add(opId) + this._registeringPeer.set(id, peerOps) + + return new Promise((resolve, reject) => { + this.pool.query('INSERT INTO ?? SET ?', + ['registration', { + namespace, + peer_id: id, + signed_peer_record: Buffer.from(signedPeerRecord), + expiration: (Date.now() + ttl) / 1000 // Epoch in seconds like MySQL + }], (err) => { + // Remove Operation + peerOps.delete(opId) + if (!peerOps.size) { + this._registeringPeer.delete(id) + } + + if (err) { + return reject(err) + } + resolve() + } + ) + }) + } + + /** + * Get registrations for a given namespace + * + * @param {string} namespace + * @param {object} [options] + * @param {number} [options.limit = 10] + * @param {string} [options.cookie] + * @returns {Promise<{ registrations: Array, cookie?: string }>} + */ + async getRegistrations (namespace, { limit = 10, cookie } = {}) { + if (cookie) { + const cookieEntries = await new Promise((resolve, reject) => { + this.pool.query( + 'SELECT * FROM cookie WHERE id = ? LIMIT 1', + [cookie], + (err, results) => { + if (err) { + return reject(err) + } + resolve(results) + } + ) + }) + if (!cookieEntries.length) { + throw errCode(new Error('no registrations for the given cookie'), errCodes.INVALID_COOKIE) + } + } + + const cookieWhereNotExists = () => { + if (!cookie) return '' + return ` AND NOT EXISTS ( + SELECT null + FROM cookie c + WHERE r.id = c.reg_id AND c.namespace = r.namespace AND c.id = ? + )` + } + + const results = await new Promise((resolve, reject) => { + this.pool.query( + `SELECT id, namespace, peer_id, signed_peer_record, expiration FROM registration r + WHERE namespace = ? AND expiration >= UNIX_TIMESTAMP(NOW()) ${cookieWhereNotExists()} + ORDER BY expiration DESC + LIMIT ?`, + [namespace, cookie || limit, limit], + (err, results) => { + if (err) { + return reject(err) + } + resolve(results) + } + ) + }) + + if (!results.length) { + return { + registrations: [], + cookie + } + } + + cookie = cookie || String(Math.random() + Date.now()) + + // Store in cookies if results available + await new Promise((resolve, reject) => { + this.pool.query( + `INSERT INTO ?? (id, namespace, reg_id) VALUES ${results.map((entry) => + `(${this.pool.escape(cookie)}, ${this.pool.escape(entry.namespace)}, ${this.pool.escape(entry.id)})` + )}`, ['cookie'] + , (err) => { + if (err) { + return reject(err) + } + // @ts-ignore + resolve() + }) + }) + + return { + registrations: results.map((r) => ({ + id: r.id, + ns: r.namespace, + signedPeerRecord: new Uint8Array(r.signed_peer_record), + ttl: r.expiration + })), + cookie + } + } + + /** + * Get number of registrations of a given peer. + * + * @param {PeerId} peerId + * @returns {Promise} + */ + getNumberOfRegistrationsFromPeer (peerId) { + const id = peerId.toB58String() + + return new Promise((resolve, reject) => { + this.pool.query('SELECT COUNT(1) FROM registration WHERE peer_id = ?', + [id], + (err, res) => { + if (err) { + return reject(err) + } + // DoS attack defense check + const pendingReg = this._getNumberOfPendingRegistrationsFromPeer(peerId) + resolve(res[0]['COUNT(1)'] + pendingReg) + } + ) + }) + } + + /** + * Get number of ongoing registrations for a peer. + * + * @param {PeerId} peerId + * @returns {number} + */ + _getNumberOfPendingRegistrationsFromPeer (peerId) { + const peerOps = this._registeringPeer.get(peerId.toB58String()) || new Set() + + return peerOps.size + } + + /** + * Remove registration of a given namespace to a peer + * + * @param {string} ns + * @param {PeerId} peerId + * @returns {Promise} + */ + removeRegistration (ns, peerId) { + const id = peerId.toB58String() + + return new Promise((resolve, reject) => { + this.pool.query('DELETE FROM registration WHERE peer_id = ? AND namespace = ?', [id, ns], + (err, res) => { + if (err) { + return reject(err) + } + resolve(res.affectedRows) + }) + }) + } + + /** + * Remove all registrations of a given peer + * + * @param {PeerId} peerId + * @returns {Promise} + */ + removePeerRegistrations (peerId) { + const id = peerId.toB58String() + + return new Promise((resolve, reject) => { + this.pool.query('DELETE FROM registration WHERE peer_id = ?', [id], + (err, res) => { + if (err) { + return reject(err) + } + resolve(res.affectedRows) + }) + }) + } + + /** + * Initialize Database if tables do not exist. + * + * @returns {Promise} + */ + _initDB () { + this.pool = mysql.createPool(this.options) + + return new Promise((resolve, reject) => { + this.pool.query(` + CREATE TABLE IF NOT EXISTS registration ( + id INT UNSIGNED NOT NULL AUTO_INCREMENT, + namespace varchar(255) NOT NULL, + peer_id varchar(255) NOT NULL, + signed_peer_record blob NOT NULL, + expiration BIGINT NOT NULL, + PRIMARY KEY (id), + INDEX (namespace, expiration, peer_id) + ); + + CREATE TABLE IF NOT EXISTS cookie ( + id varchar(21), + namespace varchar(255), + reg_id INT UNSIGNED, + PRIMARY KEY (id, namespace, reg_id), + FOREIGN KEY (reg_id) REFERENCES registration(id) ON DELETE CASCADE + ); + `, (err) => { + if (err) { + log.error(err) + return reject(err) + } + log('db is initialized') + resolve() + }) + }) + } +} + +module.exports = Mysql diff --git a/src/errors.js b/src/errors.js new file mode 100644 index 0000000..efb6560 --- /dev/null +++ b/src/errors.js @@ -0,0 +1,5 @@ +'use strict' + +exports.codes = { + INVALID_COOKIE: 'ERR_INVALID_COOKIE' +} diff --git a/src/index.js b/src/index.js index fcbf075..5d45e04 100644 --- a/src/index.js +++ b/src/index.js @@ -1,78 +1,229 @@ 'use strict' -const RPC = require('./rpc') -const noop = () => {} +const debug = require('debug') +const log = Object.assign(debug('libp2p:rendezvous-server'), { + error: debug('libp2p:rendezvous-server:err') +}) +const { + setDelayedInterval, + clearDelayedInterval +} = require('set-delayed-interval') -class RendezvousDiscovery { - constructor (swarm) { - this.swarm = swarm - this.peers = [] +const Libp2p = require('libp2p') +const PeerId = require('peer-id') + +const rpc = require('./rpc') +const { + MIN_TTL, + MAX_TTL, + MAX_NS_LENGTH, + MAX_DISCOVER_LIMIT, + MAX_PEER_REGISTRATIONS, + GC_BOOT_DELAY, + GC_INTERVAL, + GC_MIN_INTERVAL, + GC_MIN_REGISTRATIONS, + GC_MAX_REGISTRATIONS, + PROTOCOL_MULTICODEC +} = require('./constants') +const { fallbackNullish } = require('./utils') + +/** + * @typedef {import('./datastores/interface').Datastore} Datastore + * @typedef {import('./datastores/interface').Registration} Registration + */ + +/** + * @typedef {Object} RendezvousServerOptions + * @property {Datastore} datastore + * @property {number} [minTtl = MIN_TTL] minimum acceptable ttl to store a registration + * @property {number} [maxTtl = MAX_TTL] maximum acceptable ttl to store a registration + * @property {number} [maxNsLength = MAX_NS_LENGTH] maximum acceptable namespace length + * @property {number} [maxDiscoveryLimit = MAX_DISCOVER_LIMIT] maximum acceptable discover limit + * @property {number} [maxPeerRegistrations = MAX_PEER_REGISTRATIONS] maximum acceptable registrations per peer + * @property {number} [gcBootDelay = GC_BOOT_DELAY] delay before starting garbage collector job + * @property {number} [gcMinInterval = GC_MIN_INTERVAL] minimum interval between each garbage collector job, in case maximum threshold reached + * @property {number} [gcInterval = GC_INTERVAL] interval between each garbage collector job + * @property {number} [gcMinRegistrations = GC_MIN_REGISTRATIONS] minimum number of registration for triggering garbage collector + * @property {number} [gcMaxRegistrations = GC_MAX_REGISTRATIONS] maximum number of registration for triggering garbage collector + */ + +/** + * Libp2p rendezvous server. + */ +class RendezvousServer extends Libp2p { + /** + * @class + * @param {import('libp2p').Libp2pOptions} libp2pOptions + * @param {RendezvousServerOptions} options + */ + constructor (libp2pOptions, options) { + super(libp2pOptions) + + this._minTtl = fallbackNullish(options.minTtl, MIN_TTL) + this._maxTtl = fallbackNullish(options.maxTtl, MAX_TTL) + this._maxNsLength = fallbackNullish(options.maxNsLength, MAX_NS_LENGTH) + this._maxDiscoveryLimit = fallbackNullish(options.maxDiscoveryLimit, MAX_DISCOVER_LIMIT) + this._maxPeerRegistrations = fallbackNullish(options.maxPeerRegistrations, MAX_PEER_REGISTRATIONS) + + this.rendezvousDatastore = options.datastore + + this._registrationsCount = 0 + this._lastGcTs = 0 + this._gcDelay = fallbackNullish(options.gcBootDelay, GC_BOOT_DELAY) + this._gcInterval = fallbackNullish(options.gcInterval, GC_INTERVAL) + this._gcMinInterval = fallbackNullish(options.gcMinInterval, GC_MIN_INTERVAL) + this._gcMinRegistrations = fallbackNullish(options.gcMinRegistrations, GC_MIN_REGISTRATIONS) + this._gcMaxRegistrations = fallbackNullish(options.gcMaxRegistrations, GC_MAX_REGISTRATIONS) + this._gcJob = this._gcJob.bind(this) } - _dial (pi, cb) { - if (!cb) cb = noop - this.swarm.dialProtocol(pi, '/rendezvous/1.0.0', (err, conn) => { - if (err) return cb(err) - const rpc = new RPC() - rpc.setup(conn, err => { - if (err) return cb(err) - this.peers.push(rpc) - cb() - }) + /** + * Start rendezvous server for handling rendezvous streams and gc. + * + * @returns {Promise} + */ + async start () { + super.start() + + if (this._timeout) { + return + } + + log('starting') + + await this.rendezvousDatastore.start() + + // Garbage collection + this._timeout = setDelayedInterval( + this._gcJob, this._gcInterval, this._gcDelay + ) + + // Incoming streams handling + this.handle(PROTOCOL_MULTICODEC, rpc(this)) + + // Remove peer records from memory as they are not needed + // TODO: This should be handled by PeerStore itself in the future + this.peerStore.on('peer', (peerId) => { + this.peerStore.delete(peerId) }) - } - _rpc (cmd, ...a) { // TODO: add. round-robin / multicast / anycast? - this.peers[0][cmd](...a) + log('started') } - register (ns, peer, cb) { - this._rpc('register', ns, peer, 0, cb) // TODO: interface does not expose ttl option?! + /** + * Stops rendezvous server gc and clears registrations + * + * @returns {Promise} + */ + stop () { + this.unhandle(PROTOCOL_MULTICODEC) + clearDelayedInterval(this._timeout) + + this.rendezvousDatastore.stop() + + super.stop() + log('stopped') + + return Promise.resolve() } - discover (ns, limit, cookie, cb) { - if (typeof cookie === 'function') { - cb = cookie - cookie = Buffer.from('') - } - if (typeof limit === 'function') { - cookie = Buffer.from('') - cb = limit - limit = 0 - } - if (typeof ns === 'function') { - cookie = Buffer.from('') - limit = 0 - cb = ns - ns = null + /** + * Call garbage collector if enough registrations. + * + * @returns {Promise} + */ + async _gcJob () { + if (this._registrationsCount > this._gcMinRegistrations && Date.now() > this._gcMinInterval + this._lastGcTs) { + await this._gc() } + } + + /** + * Run datastore garbage collector. + * + * @returns {Promise} + */ + async _gc () { + log('gc starting') - this._rpc('discover', ns, limit, cookie, cb) + const count = await this.rendezvousDatastore.gc() + this._registrationsCount -= count + this._lastGcTs = Date.now() + + log('gc finished') } - unregister (ns, id) { - if (!ns) { - id = this.swarm.peerInfo.id.toBytes() - ns = null - } - if (!id) { - id = this.swarm.peerInfo.id.toBytes() + /** + * Add a peer registration to a namespace. + * + * @param {string} ns + * @param {PeerId} peerId + * @param {Uint8Array} signedPeerRecord + * @param {number} ttl + * @returns {Promise} + */ + async addRegistration (ns, peerId, signedPeerRecord, ttl) { + await this.rendezvousDatastore.addRegistration(ns, peerId, signedPeerRecord, ttl) + log(`added registration for the namespace ${ns} with peer ${peerId.toB58String()}`) + + this._registrationsCount += 1 + // Manually trigger garbage collector if max registrations threshold reached + // and the minGc interval is finished + if (this._registrationsCount >= this._gcMaxRegistrations && Date.now() > this._gcMinInterval + this._lastGcTs) { + this._gc() } + } - this._rpc('unregister', ns, id) + /** + * Remove registration of a given namespace to a peer + * + * @param {string} ns + * @param {PeerId} peerId + * @returns {Promise} + */ + async removeRegistration (ns, peerId) { + const count = await this.rendezvousDatastore.removeRegistration(ns, peerId) + log(`removed existing registrations for the namespace ${ns} - peer ${peerId.toB58String()} pair`) + + this._registrationsCount -= count } - start (cb) { - this.swarm.on('peer:connect', peer => { - this._dial(peer) - }) - cb() + /** + * Remove all registrations of a given peer + * + * @param {PeerId} peerId + * @returns {Promise} + */ + async removePeerRegistrations (peerId) { + const count = await this.rendezvousDatastore.removePeerRegistrations(peerId) + log(`removed existing registrations for peer ${peerId.toB58String()}`) + + this._registrationsCount -= count + } + + /** + * Get registrations for a namespace + * + * @param {string} ns + * @param {object} [options] + * @param {number} [options.limit] + * @param {string} [options.cookie] + * @returns {Promise<{ registrations: Array, cookie?: string }>} + */ + async getRegistrations (ns, { limit = MAX_DISCOVER_LIMIT, cookie } = {}) { + return await this.rendezvousDatastore.getRegistrations(ns, { limit, cookie }) } - stop (cb) { - // TODO: shutdown all conns - cb() + /** + * Get number of registrations of a given peer. + * + * @param {PeerId} peerId + * @returns {Promise} + */ + async getNumberOfRegistrationsFromPeer (peerId) { + return await this.rendezvousDatastore.getNumberOfRegistrationsFromPeer(peerId) } } -module.exports = RendezvousDiscovery +module.exports = RendezvousServer diff --git a/src/proto.js b/src/proto.js index b2c6d94..dce8fff 100644 --- a/src/proto.js +++ b/src/proto.js @@ -3,6 +3,7 @@ const protons = require('protons') module.exports = protons(` +message Message { enum MessageType { REGISTER = 0; REGISTER_RESPONSE = 1; @@ -19,22 +20,22 @@ module.exports = protons(` E_INVALID_COOKIE = 103; E_NOT_AUTHORIZED = 200; E_INTERNAL_ERROR = 300; - } - - message PeerInfo { - optional bytes id = 1; - repeated bytes addrs = 2; + E_UNAVAILABLE = 400; } message Register { optional string ns = 1; - optional PeerInfo peer = 2; + // signedPeerRecord contains a serialized SignedEnvelope containing a PeerRecord, + // signed by the sending node. It contains the same addresses as the listenAddrs field, but + // in a form that lets us share authenticated addrs with other peers. + optional bytes signedPeerRecord = 2; optional int64 ttl = 3; // in seconds } message RegisterResponse { optional ResponseStatus status = 1; optional string statusText = 2; + optional int64 ttl = 3; // in seconds } message Unregister { @@ -55,7 +56,6 @@ module.exports = protons(` optional string statusText = 4; } -message Message { optional MessageType type = 1; optional Register register = 2; optional RegisterResponse registerResponse = 3; diff --git a/src/rpc.js b/src/rpc.js deleted file mode 100644 index 5d0781b..0000000 --- a/src/rpc.js +++ /dev/null @@ -1,155 +0,0 @@ -'use strict' - -const pull = require('pull-stream') -const ppb = require('pull-protocol-buffers') -const {Message, MessageType} = require('./proto') -const Pushable = require('pull-pushable') -const debug = require('debug') -const log = debug('libp2p-rendezvous:rpc') -const Peer = require('peer-info') -const Id = require('peer-id') -const once = require('once') - -const TIMEOUT = 1000 * 10 // TODO: spec this - -function wrap (f, t) { - let cb = once((...a) => { - clearTimeout(timeout) - f(...a) - }) - let timeout - timeout = setTimeout(() => cb(new Error('Timeout!')), t) - return cb -} - -class RPC { - constructor () { - this.source = Pushable() - this.cbs = { - discover: [], - register: [] - } - } - sink (read) { - const next = (end, msg, doend) => { - if (doend) { - log('crash@%s: %s', this.id, doend) - return read(doend, next) - } - if (end) { - this.online = false - log('end@%s: %s', this.id, end) - this.source.end() - return - } - let f - let pi - switch (msg.type) { - case MessageType.REGISTER_RESPONSE: - f = this.cbs.register.shift() - if (typeof f !== 'function') { - log('register@%s: response ignored, no cb found!', this.id) - return read(null, next) - } else { - let e - if (msg.registerResponse.status) { - e = new Error('Server returned error: ' + (msg.registerResponse.statusText || '(unknown code)')) - } - f(e) - } - break - case MessageType.DISCOVER_RESPONSE: - try { - f = this.cbs.discover.shift() - if (typeof f !== 'function') { - log('discover@%s: response ignored, no cb found!', this.id) - return read(null, next) - } else { - if (msg.discoverResponse.status) { - return setImmediate(() => f(new Error('Server returned error: ' + (msg.discoverResponse.statusText || '(unknown code)')))) - } - pi = msg.discoverResponse.registrations.map(p => { - try { - // TODO: use other values like ttl/ns in peer-info? - const pi = new Peer(new Id(p.peer.id)) - p.peer.addrs.forEach(a => pi.multiaddrs.add(a)) - return pi - } catch (e) { - log('discover@%s: invalid pi returned: %s', this.id, e) - } - }).filter(Boolean) - setImmediate(() => f(null, { - cookie: msg.discoverResponse.cookie, - peers: pi - })) - } - } catch (e) { - f(e) - return next(null, null, e) - } - break - default: // should that disconnect or just get ignored? - log('error@%s: sent wrong msg type %s', this.id, msg.type) - return next(null, null, true) - } - read(null, next) - } - read(null, next) - } - setup (conn, cb) { - conn.getPeerInfo((err, pi) => { - if (err) return cb(err) - this.pi = pi - this.id = pi.id.toB58String() - pull( - conn, - ppb.decode(Message), - this, - ppb.encode(Message), - conn - ) - - this.online = true - cb() - }) - } - - register (ns, peer, ttl, cb) { - this.source.push({ - type: MessageType.REGISTER, - register: { - ns, - peer: { - id: peer.id.toBytes(), - addrs: peer.multiaddrs.toArray().map(a => a.buffer) - }, - ttl - } - }) - this.cbs.register.push(wrap(cb, TIMEOUT)) - } - - discover (ns, limit, cookie, cb) { - this.source.push({ - type: MessageType.DISCOVER, - discover: { - ns, - limit, - cookie - } - }) - this.cbs.discover.push(wrap(cb, TIMEOUT)) - } - - unregister (ns, id) { - this.source.push({ - type: MessageType.UNREGISTER, - unregister: { - ns, - id - } - }) - } -} - -module.exports = RPC diff --git a/src/rpc/handlers/discover.js b/src/rpc/handlers/discover.js new file mode 100644 index 0000000..d64d628 --- /dev/null +++ b/src/rpc/handlers/discover.js @@ -0,0 +1,97 @@ + +'use strict' + +const debug = require('debug') +const log = Object.assign(debug('libp2p:rendezvous-server:rpc:discover'), { + error: debug('libp2p:rendezvous-server:rpc:discover:err') +}) + +const fromString = require('uint8arrays/from-string') +const toString = require('uint8arrays/to-string') + +const { Message } = require('../../proto') +const MESSAGE_TYPE = Message.MessageType +const RESPONSE_STATUS = Message.ResponseStatus + +const { codes: errCodes } = require('../../errors') + +/** + * @typedef {import('peer-id')} PeerId + * @typedef {import('../..')} RendezvousPoint + */ + +/** + * @param {RendezvousPoint} rendezvousPoint + */ +module.exports = (rendezvousPoint) => { + /** + * Process `Discover` Rendezvous messages. + * + * @param {PeerId} peerId + * @param {Message} msg + * @returns {Promise} + */ + return async function discover (peerId, msg) { + try { + const namespace = msg.discover.ns + log(`discover ${peerId.toB58String()}: discover on ${namespace}`) + + // Validate namespace + if (!namespace || namespace.length > rendezvousPoint._maxNsLength) { + log.error(`invalid namespace received: ${namespace}`) + + return { + type: MESSAGE_TYPE.DISCOVER_RESPONSE, + discoverResponse: { + status: RESPONSE_STATUS.E_INVALID_NAMESPACE, + statusText: `invalid namespace received: "${namespace}". It should be smaller than ${rendezvousPoint._maxNsLength}` + } + } + } + + if (!msg.discover.limit || msg.discover.limit <= 0 || msg.discover.limit > rendezvousPoint._maxDiscoveryLimit) { + msg.discover.limit = rendezvousPoint._maxDiscoveryLimit + } + + // Get registrations + const options = { + cookie: msg.discover.cookie ? toString(msg.discover.cookie) : undefined, + limit: msg.discover.limit + } + + const { registrations, cookie } = await rendezvousPoint.getRegistrations(namespace, options) + + return { + type: MESSAGE_TYPE.DISCOVER_RESPONSE, + discoverResponse: { + cookie: cookie && fromString(cookie), + registrations: registrations.map((r) => ({ + ns: r.ns, + signedPeerRecord: r.signedPeerRecord, + ttl: r.ttl * 1e-3 // convert to seconds + })), + status: RESPONSE_STATUS.OK + } + } + } catch (err) { + log.error(err) + + if (err.code === errCodes.INVALID_COOKIE) { + return { + type: MESSAGE_TYPE.DISCOVER_RESPONSE, + discoverResponse: { + status: RESPONSE_STATUS.E_INVALID_COOKIE, + statusText: `invalid cookie received: "${toString(msg.discover.cookie)}"` + } + } + } + } + + return { + type: MESSAGE_TYPE.REGISTER_RESPONSE, + discoverResponse: { + status: RESPONSE_STATUS.E_INTERNAL_ERROR + } + } + } +} diff --git a/src/rpc/handlers/index.js b/src/rpc/handlers/index.js new file mode 100644 index 0000000..f8d37f3 --- /dev/null +++ b/src/rpc/handlers/index.js @@ -0,0 +1,22 @@ +'use strict' + +const { Message } = require('../../proto') +const MESSAGE_TYPE = Message.MessageType + +module.exports = (server) => { + const handlers = { + [MESSAGE_TYPE.REGISTER]: require('./register')(server), + [MESSAGE_TYPE.UNREGISTER]: require('./unregister')(server), + [MESSAGE_TYPE.DISCOVER]: require('./discover')(server) + } + + /** + * Get the message handler matching the passed in type. + * + * @param {number} type + * @returns {function(PeerId, Message, function(Error, Message))} + */ + return function getMessageHandler (type) { + return handlers[type] + } +} diff --git a/src/rpc/handlers/register.js b/src/rpc/handlers/register.js new file mode 100644 index 0000000..77bbd32 --- /dev/null +++ b/src/rpc/handlers/register.js @@ -0,0 +1,121 @@ + +'use strict' + +const debug = require('debug') +const log = Object.assign(debug('libp2p:rendezvous-server:rpc:register'), { + error: debug('libp2p:rendezvous-server:rpc:register:err') +}) + +const Envelope = require('libp2p/src/record/envelope') +const PeerRecord = require('libp2p/src/record/peer-record') + +const { Message } = require('../../proto') +const MESSAGE_TYPE = Message.MessageType +const RESPONSE_STATUS = Message.ResponseStatus + +/** + * @typedef {import('peer-id')} PeerId + * @typedef {import('../..')} RendezvousPoint + */ + +/** + * @param {RendezvousPoint} rendezvousPoint + */ +module.exports = (rendezvousPoint) => { + /** + * Process `Register` Rendezvous messages. + * + * @param {PeerId} peerId + * @param {Message} msg + * @returns {Promise} + */ + return async function register (peerId, msg) { + try { + const namespace = msg.register.ns + + // Validate namespace + if (!namespace || namespace.length > rendezvousPoint._maxNsLength) { + log.error(`invalid namespace received: ${namespace}`) + + return { + type: MESSAGE_TYPE.REGISTER_RESPONSE, + registerResponse: { + status: RESPONSE_STATUS.E_INVALID_NAMESPACE, + statusText: `invalid namespace received: "${namespace}". It should be smaller than ${rendezvousPoint._maxNsLength}` + } + } + } + + // Validate ttl + const ttl = msg.register.ttl * 1e3 // convert to ms + if (!ttl || ttl < rendezvousPoint._minTtl || ttl > rendezvousPoint._maxTtl) { + log.error(`invalid ttl received: ${ttl}`) + + return { + type: MESSAGE_TYPE.REGISTER_RESPONSE, + registerResponse: { + status: RESPONSE_STATUS.E_INVALID_TTL, + statusText: `invalid ttl received: "${ttl}". It should be bigger than ${rendezvousPoint._minTtl} and smaller than ${rendezvousPoint._maxTtl}` + } + } + } + + // Now check how many registrations we have for this peer + // simple limit to defend against trivial DoS attacks + // example: a peer connects and keeps registering until it fills our memory + const peerRegistrations = await rendezvousPoint.getNumberOfRegistrationsFromPeer(peerId) + if (peerRegistrations >= rendezvousPoint._maxPeerRegistrations) { + log.error('unauthorized peer to register, too many registrations') + + return { + type: MESSAGE_TYPE.REGISTER_RESPONSE, + registerResponse: { + status: RESPONSE_STATUS.E_NOT_AUTHORIZED + } + } + } + + log(`register ${peerId.toB58String()}: trying register on ${namespace} by ${ttl} ms`) + + // Open and verify envelope signature + const envelope = await Envelope.openAndCertify(msg.register.signedPeerRecord, PeerRecord.DOMAIN) + + // Validate auth + if (!envelope.peerId.equals(peerId.toBytes())) { + log.error('unauthorized peer id to register') + + return { + type: MESSAGE_TYPE.REGISTER_RESPONSE, + registerResponse: { + status: RESPONSE_STATUS.E_NOT_AUTHORIZED + } + } + } + + // Add registration + await rendezvousPoint.addRegistration( + namespace, + peerId, + msg.register.signedPeerRecord, + ttl + ) + + return { + type: MESSAGE_TYPE.REGISTER_RESPONSE, + registerResponse: { + status: RESPONSE_STATUS.OK, + ttl: msg.register.ttl + } + } + } catch (err) { + log.error(err) + } + + return { + type: MESSAGE_TYPE.REGISTER_RESPONSE, + registerResponse: { + status: RESPONSE_STATUS.E_INTERNAL_ERROR + } + } + } +} diff --git a/src/rpc/handlers/unregister.js b/src/rpc/handlers/unregister.js new file mode 100644 index 0000000..0fa708c --- /dev/null +++ b/src/rpc/handlers/unregister.js @@ -0,0 +1,52 @@ + +'use strict' + +const debug = require('debug') +const log = Object.assign(debug('libp2p:rendezvous-server:rpc:unregister'), { + error: debug('libp2p:rendezvous-server:rpc:unregister:err') +}) + +const equals = require('uint8arrays/equals') + +/** + * @typedef {import('peer-id')} PeerId + * @typedef {import('../..')} RendezvousPoint + */ + +/** + * @param {RendezvousPoint} rendezvousPoint + */ +module.exports = (rendezvousPoint) => { + /** + * Process `Unregister` Rendezvous messages. + * + * @param {PeerId} peerId + * @param {Message} msg + * @returns {Promise} + */ + return async function unregister (peerId, msg) { + try { + log(`unregister ${peerId.toB58String()}: trying unregister from ${msg.unregister.ns}`) + + if (!msg.unregister.id && !msg.unregister.ns) { + log.error('no peerId or namespace provided') + return + } + + // Validate auth + if (!equals(msg.unregister.id, peerId.toBytes())) { + log.error('unauthorized peer id to unregister') + return + } + + // Remove registration + if (!msg.unregister.ns) { + await rendezvousPoint.removePeerRegistrations(peerId) + } else { + await rendezvousPoint.removeRegistration(msg.unregister.ns, peerId) + } + } catch (err) { + log.error(err) + } + } +} diff --git a/src/rpc/index.js b/src/rpc/index.js new file mode 100644 index 0000000..84df8e2 --- /dev/null +++ b/src/rpc/index.js @@ -0,0 +1,68 @@ +'use strict' + +const debug = require('debug') +const log = Object.assign(debug('libp2p:rendezvous-server:rpc'), { + error: debug('libp2p:rendezvous-server:rpc:err') +}) + +const { pipe } = require('it-pipe') +const lp = require('it-length-prefixed') +const { toBuffer } = require('it-buffer') + +const handlers = require('./handlers') +const { Message } = require('../proto') + +module.exports = (rendezvous) => { + const getMessageHandler = handlers(rendezvous) + + /** + * Process incoming Rendezvous messages. + * + * @param {import('peer-id')} peerId + * @param {Message} msg + * @returns {Promise | undefined} + */ + function handleMessage (peerId, msg) { + const handler = getMessageHandler(msg.type) + + if (!handler) { + log.error(`no handler found for message type: ${msg.type}`) + return + } + + return handler(peerId, msg) + } + + /** + * Handle incoming streams on the rendezvous protocol. + * + * @param {Object} props + * @param {DuplexStream} props.stream + * @param {Connection} props.connection - connection + * @returns {Promise} + */ + return async function onIncomingStream ({ stream, connection }) { + const peerId = connection.remotePeer + + log('incoming stream from: %s', peerId.toB58String()) + + await pipe( + stream.source, + lp.decode(), + toBuffer, + source => (async function * () { + for await (const msg of source) { + // handle the message + const desMessage = Message.decode(msg) + const res = await handleMessage(peerId, desMessage) + + if (res) { + yield Message.encode(res) + } + } + })(), + lp.encode(), + stream.sink + ) + } +} diff --git a/src/server/index.js b/src/server/index.js deleted file mode 100644 index e7d7ca7..0000000 --- a/src/server/index.js +++ /dev/null @@ -1,73 +0,0 @@ -'use strict' - -// const {waterfall} = require('async') -const RPC = require('./rpc') -const debug = require('debug') -const log = debug('libp2p:rendezvous:server') -const AsyncQueue = require('./queue') -const BasicStore = require('./store/basic') - -class Server { - constructor (opt) { - if (!opt) opt = {} - this.node = opt.node - this.config = opt.config - this.que = new AsyncQueue() - this.table = { - NS: {}, - RPC: {} - } - const Store = opt.store || BasicStore - this.store = new Store(this) - this._stubNS = this.store.create(Buffer.alloc(256, '0').toString()) - } - - start () { - this.gcIntv = setInterval(this.gc.bind(this), 60 * 1000) - this.node.handle('/rendezvous/1.0.0', (proto, conn) => { - const rpc = new RPC(this) - rpc.setup(conn, err => { - if (err) return log(err) - this.storeRPC(rpc) - }) - }) - } - - stop () { - clearInterval(this.gcIntv) - // TODO: clear vars, shutdown conns, etc. - this.node.unhandle('/rendezvous/1.0.0') - } - - storeRPC (rpc) { - // TODO: should a peer that's connected twice be overriden or rejected? - this.table.RPC[rpc.id] = rpc - // TODO: remove on disconnect - } - - getNS (name, create) { - if (!this.table.NS[name]) { - if (create) { - return (this.table.NS[name] = this.store.create(name)) - } else { - return this._stubNS - } - } - return this.table.NS[name] - } - - gc () { - Object.keys(this.table.NS).forEach(ns => { - const n = this.table.NS[ns] - const removed = n.gc() - if (n.useless) { - log('drop NS %s because it is empty', n.name) - delete this.table.NS[ns] - } else { - if (removed) n.update() - } - }) - } -} - -module.exports = Server diff --git a/src/server/queue.js b/src/server/queue.js deleted file mode 100644 index f12b5c6..0000000 --- a/src/server/queue.js +++ /dev/null @@ -1,33 +0,0 @@ -'use strict' - -const debug = require('debug') -const log = debug('libp2p:rendezvous:queue') - -class AsyncQueue { - constructor () { - this.tasks = [] - this.taskIds = {} - this.triggered = false - } - add (name, fnc) { - if (this.taskIds[name]) return - log('queueing %s', name) - this.taskIds[name] = true - this.tasks.push(fnc) - this.trigger() - } - trigger () { - if (this.triggered) return - this.triggered = true - setTimeout(() => { - log('exec') - this.tasks.forEach(f => f()) - this.tasks = [] - this.taskIds = {} - this.triggered = false - log('exec done') - }, 100).unref() - } -} - -module.exports = AsyncQueue diff --git a/src/server/rpc.js b/src/server/rpc.js deleted file mode 100644 index fd49d2d..0000000 --- a/src/server/rpc.js +++ /dev/null @@ -1,159 +0,0 @@ -'use strict' - -const pull = require('pull-stream') -const ppb = require('pull-protocol-buffers') -const {Message, MessageType, ResponseStatus} = require('../proto') -const Pushable = require('pull-pushable') -const debug = require('debug') -const log = debug('libp2p-rendezvous:server:rpc') -const Peer = require('peer-info') -const Id = require('peer-id') - -const MAX_NS_LENGTH = 255 // TODO: spec this -const MAX_LIMIT = 1000 // TODO: spec this - -const registerErrors = { - 100: 'Invalid namespace provided', - 101: 'Invalid peer-info provided', - 102: 'Invalid TTL provided', - 103: 'Invalid cookie provided', - 200: 'Not authorized', - 300: 'Internal Server Error' -} - -const craftStatus = (status) => { - return { - status, - statusText: registerErrors[status] - } -} - -class RPC { - constructor (main) { - this.main = main - this.source = Pushable() - } - sink (read) { - const next = (end, msg, doend) => { - if (doend) { - log('crash@%s: %s', this.id, doend) - return read(doend, next) - } - if (end) { - this.online = false - log('end@%s: %s', this.id, end) - this.source.end() - return - } - switch (msg.type) { - case MessageType.REGISTER: - try { - log('register@%s: trying register on %s', this.id, msg.register.ns) - if (msg.register.peer.id && new Id(msg.register.peer.id).toB58String() !== this.id) { - log('register@%s: auth err (want %s)', this.id, new Id(msg.register.peer.id).toB58String()) - this.source.push({ - type: MessageType.REGISTER_RESPONSE, - registerResponse: craftStatus(ResponseStatus.E_NOT_AUTHORIZED) - }) - return read(null, next) - } else if (!msg.register.peer.id) { - msg.register.peer.id = this.pi.id.toBytes() - } - if (msg.register.ns > MAX_NS_LENGTH) { - log('register@%s: ns err', this.id) - this.source.push({ - type: MessageType.REGISTER_RESPONSE, - registerResponse: craftStatus(ResponseStatus.E_INVALID_NAMESPACE) - }) - return read(null, next) - } - const pi = new Peer(new Id(msg.register.peer.id)) - msg.register.peer.addrs.forEach(a => pi.multiaddrs.add(a)) - this.main.getNS(msg.register.ns, true).addPeer(pi, Date.now(), msg.register.ttl, () => this.online) - log('register@%s: ok', this.id) - this.source.push({ - type: MessageType.REGISTER_RESPONSE, - registerResponse: craftStatus(ResponseStatus.OK) - }) - } catch (e) { - log('register@%s: internal error', this.id) - log(e) - this.source.push({ - type: MessageType.REGISTER_RESPONSE, - registerResponse: craftStatus(ResponseStatus.E_INTERNAL_ERROR) - }) - return read(null, next) - } - break - case MessageType.UNREGISTER: - try { - log('unregister@%s: unregister from %s', this.id, msg.unregister.ns) - // TODO: currently ignores id since there is no ownership error. change? - this.main.getNS(msg.unregister.ns).removePeer(this.id) - } catch (e) { - return next(null, null, e) - } - break - case MessageType.DISCOVER: - try { - // TODO: add more errors - log('discover@%s: discover on %s', this.id, msg.discover.ns) - if (msg.discover.limit <= 0 || msg.discover.limit > MAX_LIMIT) msg.discover.limit = MAX_LIMIT - const {peers, cookie} = this.main.getNS(msg.discover.ns).getPeers(msg.discover.cookie || Buffer.from(''), msg.discover.limit, this.id) - log('discover@%s: got %s peers', this.id, peers.length) - this.source.push({ - type: MessageType.DISCOVER_RESPONSE, - discoverResponse: { - registrations: peers.map(p => { - return { - ns: msg.discover.ns, - peer: { - id: p.pi.id.toBytes(), - addrs: p.pi.multiaddrs.toArray().map(a => a.buffer) - }, - ttl: p.ttl - } - }), - cookie - } - }) - } catch (e) { - log('discover@%s: internal error', this.id) - log(e) - this.source.push({ - type: MessageType.DISCOVER_RESPONSE, - registerResponse: craftStatus(ResponseStatus.E_INTERNAL_ERROR) - }) - return read(null, next) - } - break - // case MessageType.REGISTER_RESPONSE: - // case MessageType.DISCOVER_RESPONSE: - default: // should that disconnect or just get ignored? - log('error@%s: sent wrong msg type %s', this.id, msg.type) - return next(null, null, true) - } - read(null, next) - } - read(null, next) - } - setup (conn, cb) { - conn.getPeerInfo((err, pi) => { - if (err) return cb(err) - this.pi = pi - this.id = pi.id.toB58String() - pull( - conn, - ppb.decode(Message), - this, - ppb.encode(Message), - conn - ) - - this.online = true - cb() - }) - } -} - -module.exports = RPC diff --git a/src/server/store/basic/index.js b/src/server/store/basic/index.js deleted file mode 100644 index 0095090..0000000 --- a/src/server/store/basic/index.js +++ /dev/null @@ -1,60 +0,0 @@ -'use strict' - -class NS { - constructor (name, que) { // name is a utf8 string - this.name = name - this.hexName = Buffer.from(name).toString('hex') // needed to prevent queue-dos attacks - this.que = que - this.id = {} - this.sorted = [] - } - addPeer (pi, ts, ttl, isOnline) { // isOnline returns a bool if the rpc connection still exists - const id = pi.id.toB58String() - this.id[id] = {pi, ts, ttl} - if (ttl) { - let expireAt = ts + ttl * 1000 - this.id[id].online = () => Date.now() >= expireAt - } else { - this.id[id].online = isOnline - } - this.update() - } - removePeer (pid) { - delete this.id[pid] - this.update() - } - update () { - this.que.add('sort@' + this.hexName, () => { - this.sorted = Object.keys(this.id).map(id => { return {id, ts: this.id[id].ts} }).sort((a, b) => a.ts - b.ts) - }) - } - getPeers (cookie, limit, ownId) { - cookie = cookie.length ? parseInt(cookie.toString(), 10) : 0 - let p = this.sorted.filter(p => p.ts > cookie && p.id !== ownId).slice(0, limit).map(p => this.id[p.id]) - let newCookie - if (p.length) { - newCookie = Buffer.from(p[p.length - 1].ts.toString()) - } else { - newCookie = Buffer.from('') - } - return {cookie: newCookie, peers: p} - } - gc () { - return Object.keys(this.id).filter(k => !this.id[k].online()).map(k => delete this.id[k]).length - } - get useless () { - return !Object.keys(this.id).length - } -} - -class BasicStore { - constructor (main) { - this.main = main - } - create (name) { - return new NS(name, this.main.que) - } -} - -module.exports = BasicStore -module.exports.NS = NS diff --git a/src/utils.js b/src/utils.js new file mode 100644 index 0000000..becdf70 --- /dev/null +++ b/src/utils.js @@ -0,0 +1,68 @@ +'use strict' + +function getExtraParams (alias1, alias2) { + const params = [] + + const flagIndex = process.argv.findIndex((e) => e === alias1 || e === alias2) + const tmpEndIndex = process.argv.slice(flagIndex + 1).findIndex((e) => e.startsWith('--')) + const endIndex = tmpEndIndex !== -1 ? tmpEndIndex : process.argv.length - flagIndex - 1 + + for (let i = flagIndex + 1; i < flagIndex + endIndex; i++) { + params.push(process.argv[i + 1]) + } + + return params +} + +function getAnnounceAddresses (argv) { + let announceAddresses = [] + const argvAddr = argv.announceMultiaddrs || argv.am + + if (argvAddr) { + announceAddresses = [argvAddr] + + const extraParams = getExtraParams('--announceMultiaddrs', '--am') + extraParams.forEach((p) => announceAddresses.push(p)) + } else if (process.env.ANNOUNCE_MULTIADDRS) { + announceAddresses = process.env.ANNOUNCE_MULTIADDRS.split(',') + } + + return announceAddresses +} + +function getListenAddresses (argv) { + let listenAddresses = ['/ip4/127.0.0.1/tcp/15003/ws', '/ip4/127.0.0.1/tcp/8000'] + const argvAddr = argv.listenMultiaddrs || argv.lm + + if (argvAddr) { + listenAddresses = [argvAddr] + + const extraParams = getExtraParams('--listenMultiaddrs', '--lm') + extraParams.forEach((p) => listenAddresses.push(p)) + } else if (process.env.LISTEN_MULTIADDRS) { + listenAddresses = process.env.LISTEN_MULTIADDRS.split(',') + } + + return listenAddresses +} + +/** + * Nullish coalescing operator implementation + * + * @template T + * @param {any} value + * @param {T} d - default value + * @returns {T} + */ +function fallbackNullish (value, d) { + if (value === null || value === undefined) { + return d + } + return value +} + +module.exports = { + getAnnounceAddresses, + getListenAddresses, + fallbackNullish +} diff --git a/test/client.id.json b/test/client.id.json deleted file mode 100644 index 97b1858..0000000 --- a/test/client.id.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "id": "QmVMx9YqSRYB75sGcYiTVtCpygxcfaxcuSJMBYoBadgJ7r", - "privKey": "CAASqQkwggSlAgEAAoIBAQCZPiah1KCGIIsMDXvxxk3djZfgCpckUDOAsG83FNwLx+3Z8Lg1LLAoArtula6/4LOaTaRZA9LKiSBw4yEgTMlinw77hxg6SLoHeMHi1AS/0MxCQuKxWZaeM5dtFkiUU/qJVwhTksIjmHtm/gWcBjmObAnRzeHIOLdlBL+6tcELYKH4OKcxD/VWMxFBbo5bjnPTddeQpSEtTVzsqX4kC4sBIHO3otEY8z9nefRXP8zIZ3TpfWcXMAhzbF7aJWHlUkDSblDCH41JlDXenvcerTlPN0Oqdj+8e5914qSTdSPAHbFyiGKeDc55thYZI2jDpNksSOZ2/HhDOjmNAE970VrRAgMBAAECggEBAJVnQdTvb521JruWfevHkezaemMFEDxoMP5bheKm5K5buuqLxZyaOBiaKVD0gE40bgaXgg8DKkUqkkVdO9O46XLMbpgOKzHP7AcS1b0nRoYYtLw5Z7jPBoiw9gZ1/kcW5SF3h/erEroPlOhh6ugmLYFMlfpGBsXlfe/wRFltkItcohvQKm45tDowmRNX7Gw/qE2m2Bcu8nF34OWNqSlhi/LccZWMHYXemy/MyYPeM2pMsebyEOKQb7cTKPUS7UqOYotvq8p8b/IRRi0vJZNtnON3qQDZhP3uPzZYFErWcrL3BIEgwMBNcJb2yNIvrc7LnH8xO0l7fLuYmJW1su8mQf0CgYEA5BlzcW24UPTvZdPjWHAGZBwK8RLHKxh615jfTjO4SI9ggdNVIOAWFQSKAyJT4hLyvg+4yR21WpZMj2pR3loO+Nn7djviB8a8SlWeqPSjpN/QT9nSl6JVcdoNp4vEyu+9O2MbNJp5oCZ/q6k9DovvZuxMc/2cNWo/YR4K6xcvehcCgYEAq/ywujkJ9fptpkJ5HzO7zt5sy+dodT2EDPTCeMQeLNXTC+P31th2FIl4/FzGrLJzVtWmP3kUS0es8hrRuLBy+Zg51czMliX8eJa36Vk305EAynkfIcGEzgmdbDVx6QyIWk1Y18WzVyuiVnLMbxT6ZfNXahwLNnV7umk0OH1bK1cCgYEAo9GTk7dVVO9UsDFJak6qiGOLiDAQUuc18nmchzGl/JbcnOEGlqHZuiaUaEPTMt6g79eiwu5PPUwMmEOnoKXVcuw7KWNApo0Y1dpAJN/uV49WsMKj+Lth2m7ct6QuJgGgSnKXK2R2TYrYzpSxgS0HN0gmcHeIJOS1uC43cTgppOkCgYAFTGSZaBZxeISWQaf/mRVpGxsY8Qkby4hc6dFv7QLM+M1mqWBCQyroGRAcHjOUsG6zNyPHAtDoPM4MK11Ypj70h4cImiWXXpY3lNUXoEMDBo2Sr0aRQKf5vPwXkFHxDwzIU2ewRgvvXI3EwgagSXIpX+TKhRCnXdkw9frA3sPHQwKBgQC4mCqbD3DhjiQFPOhhlZtHgruKs6b6L7npyfc4ZrDVwGFrOcbBQbuLD2f92wbB0FKWozx4FP0nlawigVHimMWG4qyJycDMBptfsUrqvxzRagmci+ZRY+cjyz2sA1Tox4nE4vVEz5ZxBGdmkRLY+hWRZMWDJrYLoZSBEKj0B336yw==", - "pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCZPiah1KCGIIsMDXvxxk3djZfgCpckUDOAsG83FNwLx+3Z8Lg1LLAoArtula6/4LOaTaRZA9LKiSBw4yEgTMlinw77hxg6SLoHeMHi1AS/0MxCQuKxWZaeM5dtFkiUU/qJVwhTksIjmHtm/gWcBjmObAnRzeHIOLdlBL+6tcELYKH4OKcxD/VWMxFBbo5bjnPTddeQpSEtTVzsqX4kC4sBIHO3otEY8z9nefRXP8zIZ3TpfWcXMAhzbF7aJWHlUkDSblDCH41JlDXenvcerTlPN0Oqdj+8e5914qSTdSPAHbFyiGKeDc55thYZI2jDpNksSOZ2/HhDOjmNAE970VrRAgMBAAE=" -} diff --git a/test/client2.id.json b/test/client2.id.json deleted file mode 100644 index 57c78fa..0000000 --- a/test/client2.id.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "id": "QmcpjE4Mgs1kc31gN6DSRDPwQqY8C2x6iGuFo3gzTxJdSQ", - "privKey": "CAASpgkwggSiAgEAAoIBAQCZ2FkxB7wjzf1H8fqWGh843E5ZtmSQpQ4DtP4HYZAgNPNc66GmP9itAR44WziPgS3BC3gfXuWE7OTtZ0dhZj2e2BTpKZwXVoAwBaV9IZrbTntFsG50rHzoyCulGvJu2RPmG1PyE9+WLXM+oiaRqY2YshLoMkS88BsRw9+PBaA2jA75Bay/wm8AsmXEc/PloAGE6PiyM6nD/66WGpxScZ2Z1BO0MUy0MUBBCvA+D8fxTpzLqBGHrIJxIK+MebAkWNFWrWyvdnapABCxvL5pSjjjGb1SyUkeTr8Bn/IsFfnjjuZaszKTqYsNGZCxlDJdrL2rlEoP4MIEP2wM+WHYqFIBAgMBAAECggEAZV4mNqYwEy9xCeyo/iosFF0kyvvg+2Wl/E9PajGgs3fwOnOPyWkcLbIk5WFFvViSezZBafovJQyqMrrwT378byNVc+RU0xPN1taBmheAX6wwkVSVEw9sJj1udJVy1BL4h4/OGh16HwvHeaeB3kxn3grHZnNo000pqOT08tn0HLvZ1396SUXjyNahMco/dTOtUz8PQAKtu+MW0ekkczGi6mBPaiR/rzlzQZRresQ368azwjTTwe1T4YwQ7buPxrW2GRfnVfU5Uvp99acKnj5bKixQtyZDiKbHWK5PX3diMfYuPdgdz0R4EOIokE0akR+lxpk4Um7+gDHbI6ufvLQSAQKBgQDX0WGxpJg6nwHspxdS75syelyRuGWoS6PPxqhQws37RLSV6aoAZoAzvhxDPT3aDaR1VVJuxnDpVjidCTArk3vHuzOMriwKLDz2NifkWnSCiBdCjpY2aqbF+sQgwLAf+5RV8xQOkc2+4p0xXYV+21XXWLN8R2gezB7exXyrvH4CkQKBgQC2fSHOcvCM9ZmyLc99bfGJ0NbTC0s9G0EkFWni9V61vfQYxME4Mi1XPRwOrCP/dguATL/QRMD24xvZHYR1+vwY+A3MPZdOtrgeVdjia9VuZ8VLEdgFTFWOY9d3XlbLiiRSj0G8IjC+WEN5k5mWJn7XA+HdkQhuY+NyJ1nI5q4wcQKBgEUxqHTgJL6GxIMvf1bj44pnmM5PpKg0uCyhsM1T596rxIpcBFlkg64TQdR9ChujTBsiY++ISCNHtZcDnyIZgxIifwCXxx7r2A/IhTm9lqVTJMH+HUMNJrNLFx65KL7YVlLIQKH7NVACMAvnxClMAVWt5r3t1wAoyaz6/GHDaVNBAoGAJVtoWELfS3vbgsYt+5dOItBFqd5eAJxbsW9Qxc1FHh9MoOVmSIK9FWbFH5vNorYflJwhiBkLB39mbAPG4gAHK3VcHbteBhcRieQ5CeDZSEil8sAsYKlHumZl7WG6kuAsn1oEMucs40peRb0Za8tlm86HpjvSZga8wNmdX6sZbYECgYArG+pWYWjjXi1ithkCaLiL1816KPBqwXOjPHjXFuUSpbU+O7lBlEhMD6Fcj8IuxQzvbwn8L7TZRyZtN5xYBlPAblAU5PTgMKU1mQiqt0IyfifnyG0As+fbTcmGMAmd089UGBp/OLblgzoXfDhhMSo/ymrfjFYOQr88mPnbRagGWA==", - "pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCZ2FkxB7wjzf1H8fqWGh843E5ZtmSQpQ4DtP4HYZAgNPNc66GmP9itAR44WziPgS3BC3gfXuWE7OTtZ0dhZj2e2BTpKZwXVoAwBaV9IZrbTntFsG50rHzoyCulGvJu2RPmG1PyE9+WLXM+oiaRqY2YshLoMkS88BsRw9+PBaA2jA75Bay/wm8AsmXEc/PloAGE6PiyM6nD/66WGpxScZ2Z1BO0MUy0MUBBCvA+D8fxTpzLqBGHrIJxIK+MebAkWNFWrWyvdnapABCxvL5pSjjjGb1SyUkeTr8Bn/IsFfnjjuZaszKTqYsNGZCxlDJdrL2rlEoP4MIEP2wM+WHYqFIBAgMBAAE=" -} diff --git a/test/discovery.spec.js b/test/discovery.spec.js deleted file mode 100644 index 687b40f..0000000 --- a/test/discovery.spec.js +++ /dev/null @@ -1,68 +0,0 @@ -'use strict' - -/* eslint-env mocha */ - -const {parallel} = require('async') -const Utils = require('./utils') - -const chai = require('chai') -const dirtyChai = require('dirty-chai') -const expect = chai.expect -chai.use(dirtyChai) - -describe('discovery', () => { - let client - let client2 - let server - - before(done => { - Utils.default((err, _client, _server, _client2) => { - if (err) return done(err) - client = _client - client2 = _client2 - server = _server - parallel([client, client2].map(c => cb => c._dial(server.node.peerInfo, cb)), done) - }) - }) - - it('register', done => { - parallel( - [client, client2].map(c => cb => c.register('hello', c.swarm.peerInfo, cb)), - (...a) => setTimeout(() => done(...a), 100) // Queue is being processed every 100ms - ) - }) - - it('discover', done => { - client.discover('hello', (err, res) => { - if (err) return done(err) - expect(err).to.not.exist() - expect(res.peers).to.have.lengthOf(1) - expect(res.peers[0].id.toB58String()).to.equal(client2.swarm.peerInfo.id.toB58String()) - done() - }) - }) - - it('unregister', done => { - client2.unregister('hello') - setTimeout(() => done(), 100) // Queue is being processed every 100ms - }) - - it('discover (after unregister)', done => { - client.discover('hello', (err, res) => { - if (err) return done(err) - expect(err).to.not.exist() - expect(res.peers).to.have.lengthOf(0) - done() - }) - }) - - it('unregister other client', done => { - client.unregister('hello') - setTimeout(() => done(), 100) // Queue is being processed every 100ms - }) - - it('gc', () => { - server.gc() - expect(Object.keys(server.table.NS)).to.have.lengthOf(0) - }) -}) diff --git a/test/dos-attack-protection.spec.js b/test/dos-attack-protection.spec.js new file mode 100644 index 0000000..abf25c6 --- /dev/null +++ b/test/dos-attack-protection.spec.js @@ -0,0 +1,110 @@ +'use strict' +/* eslint-env mocha */ + +const { expect } = require('aegir/utils/chai') + +const { pipe } = require('it-pipe') +const lp = require('it-length-prefixed') +const { collect } = require('streaming-iterables') +const { toBuffer } = require('it-buffer') + +const multiaddr = require('multiaddr') +const Libp2p = require('libp2p') + +const RendezvousServer = require('../src') +const { + PROTOCOL_MULTICODEC +} = require('../src/constants') +const { Message } = require('../src/proto') +const MESSAGE_TYPE = Message.MessageType +const RESPONSE_STATUS = Message.ResponseStatus + +const { + createPeerId, + createDatastore, + defaultLibp2pConfig +} = require('./utils') + +const { MULTIADDRS_WEBSOCKETS } = require('./fixtures/browser') +const relayAddr = MULTIADDRS_WEBSOCKETS[0] + +describe('DoS attack protection', () => { + const ns = 'test-ns' + const ttl = 7.2e6 * 1e-3 + + let datastore + let rServer + let client + let peerId + let multiaddrServer + + // Create client and server and connect them + beforeEach(async () => { + [peerId] = await createPeerId() + + datastore = createDatastore() + rServer = new RendezvousServer({ + peerId: peerId, + addresses: { + listen: [`${relayAddr}/p2p-circuit`] + }, + ...defaultLibp2pConfig + }, { maxPeerRegistrations: 1, datastore }) // Maximum of one registration + + multiaddrServer = multiaddr(`${relayAddr}/p2p-circuit/p2p/${peerId.toB58String()}`) + + client = await Libp2p.create({ + addresses: { + listen: [`${relayAddr}/p2p-circuit`] + }, + ...defaultLibp2pConfig + }) + + await Promise.all([rServer, client].map((n) => n.start())) + }) + + afterEach(async () => { + await datastore.reset() + await Promise.all([rServer, client].map((n) => n.stop())) + }) + + it('can register a namespace', async () => { + const conn = await client.dial(multiaddrServer) + const { stream } = await conn.newStream(PROTOCOL_MULTICODEC) + + const responses = await pipe( + [ + Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord: client.peerStore.addressBook.getRawEnvelope(client.peerId), + ns, + ttl + } + }), + Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord: client.peerStore.addressBook.getRawEnvelope(client.peerId), + ns, + ttl + } + }) + ], + lp.encode(), + stream, + lp.decode(), + toBuffer, + collect + ) + + const recMessage = Message.decode(responses[1]) + expect(recMessage).to.exist() + expect(recMessage.type).to.eql(MESSAGE_TYPE.REGISTER_RESPONSE) + expect(recMessage.registerResponse.status).to.eql(RESPONSE_STATUS.E_NOT_AUTHORIZED) + + // Only one record + const { registrations } = await rServer.getRegistrations(ns) + expect(registrations).to.have.lengthOf(1) + }) +}) diff --git a/test/fixtures/browser.js b/test/fixtures/browser.js new file mode 100644 index 0000000..901c32e --- /dev/null +++ b/test/fixtures/browser.js @@ -0,0 +1,7 @@ +'use strict' + +const multiaddr = require('multiaddr') + +module.exports.MULTIADDRS_WEBSOCKETS = [ + multiaddr('/ip4/127.0.0.1/tcp/15001/ws/p2p/QmckxVrJw1Yo8LqvmDJNUmdAsKtSbiKWmrXJFyKmUraBoN') +] diff --git a/test/fixtures/peers.js b/test/fixtures/peers.js new file mode 100644 index 0000000..fad0d23 --- /dev/null +++ b/test/fixtures/peers.js @@ -0,0 +1,27 @@ +'use strict' + +module.exports = [{ + id: 'QmNMMAqSxPetRS1cVMmutW5BCN1qQQyEr4u98kUvZjcfEw', + privKey: 'CAASpQkwggShAgEAAoIBAQDPek2aeHMa0blL42RTKd6xgtkk4Zkldvq4LHxzcag5uXepiQzWANEUvoD3KcUTmMRmx14PvsxdLCNst7S2JSa0R2n5wSRs14zGy6892lx4H4tLBD1KSpQlJ6vabYM1CJhIQRG90BtzDPrJ/X1iJ2HA0PPDz0Mflam2QUMDDrU0IuV2m7gSCJ5r4EmMs3U0xnH/1gShkVx4ir0WUdoWf5KQUJOmLn1clTRHYPv4KL9A/E38+imNAXfkH3c2T7DrCcYRkZSpK+WecjMsH1dCX15hhhggNqfp3iulO1tGPxHjm7PDGTPUjpCWKpD5e50sLqsUwexac1ja6ktMfszIR+FPAgMBAAECggEAB2H2uPRoRCAKU+T3gO4QeoiJaYKNjIO7UCplE0aMEeHDnEjAKC1HQ1G0DRdzZ8sb0fxuIGlNpFMZv5iZ2ZFg2zFfV//DaAwTek9tIOpQOAYHUtgHxkj5FIlg2BjlflGb+ZY3J2XsVB+2HNHkUEXOeKn2wpTxcoJE07NmywkO8Zfr1OL5oPxOPlRN1gI4ffYH2LbfaQVtRhwONR2+fs5ISfubk5iKso6BX4moMYkxubYwZbpucvKKi/rIjUA3SK86wdCUnno1KbDfdXSgCiUlvxt/IbRFXFURQoTV6BOi3sP5crBLw8OiVubMr9/8WE6KzJ0R7hPd5+eeWvYiYnWj4QKBgQD6jRlAFo/MgPO5NZ/HRAk6LUG+fdEWexA+GGV7CwJI61W/Dpbn9ZswPDhRJKo3rquyDFVZPdd7+RlXYg1wpmp1k54z++L1srsgj72vlg4I8wkZ4YLBg0+zVgHlQ0kxnp16DvQdOgiRFvMUUMEgetsoIx1CQWTd67hTExGsW+WAZQKBgQDT/WaHWvwyq9oaZ8G7F/tfeuXvNTk3HIJdfbWGgRXB7lJ7Gf6FsX4x7PeERfL5a67JLV6JdiLLVuYC2CBhipqLqC2DB962aKMvxobQpSljBBZvZyqP1IGPoKskrSo+2mqpYkeCLbDMuJ1nujgMP7gqVjabs2zj6ACKmmpYH/oNowJ/T0ZVtvFsjkg+1VsiMupUARRQuPUWMwa9HOibM1NIZcoQV2NGXB5Z++kR6JqxQO0DZlKArrviclderUdY+UuuY4VRiSEprpPeoW7ZlbTku/Ap8QZpWNEzZorQDro7bnfBW91fX9/81ets/gCPGrfEn+58U3pdb9oleCOQc/ifpQKBgBTYGbi9bYbd9vgZs6bd2M2um+VFanbMytS+g5bSIn2LHXkVOT2UEkB+eGf9KML1n54QY/dIMmukA8HL1oNAyalpw+/aWj+9Ui5kauUhGEywHjSeBEVYM9UXizxz+m9rsoktLLLUI0o97NxCJzitG0Kub3gn0FEogsUeIc7AdinZAoGBANnM1vcteSQDs7x94TDEnvvqwSkA2UWyLidD2jXgE0PG4V6tTkK//QPBmC9eq6TIqXkzYlsErSw4XeKO91knFofmdBzzVh/ddgx/NufJV4tXF+a2iTpqYBUJiz9wpIKgf43/Ob+P1EA99GAhSdxz1ess9O2aTqf3ANzn6v6g62Pv', + pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPek2aeHMa0blL42RTKd6xgtkk4Zkldvq4LHxzcag5uXepiQzWANEUvoD3KcUTmMRmx14PvsxdLCNst7S2JSa0R2n5wSRs14zGy6892lx4H4tLBD1KSpQlJ6vabYM1CJhIQRG90BtzDPrJ/X1iJ2HA0PPDz0Mflam2QUMDDrU0IuV2m7gSCJ5r4EmMs3U0xnH/1gShkVx4ir0WUdoWf5KQUJOmLn1clTRHYPv4KL9A/E38+imNAXfkH3c2T7DrCcYRkZSpK+WecjMsH1dCX15hhhggNqfp3iulO1tGPxHjm7PDGTPUjpCWKpD5e50sLqsUwexac1ja6ktMfszIR+FPAgMBAAE=' +}, { + id: 'QmW8rAgaaA6sRydK1k6vonShQME47aDxaFidbtMevWs73t', + privKey: 'CAASpwkwggSjAgEAAoIBAQCTU3gVDv3SRXLOsFln9GEf1nJ/uCEDhOG10eC0H9l9IPpVxjuPT1ep+ykFUdvefq3D3q+W3hbmiHm81o8dYv26RxZIEioToUWp7Ec5M2B/niYoE93za9/ZDwJdl7eh2hNKwAdxTmdbXUPjkIU4vLyHKRFbJIn9X8w9djldz8hoUvC1BK4L1XrT6F2l0ruJXErH2ZwI1youfSzo87TdXIoFKdrQLuW6hOtDCGKTiS+ab/DkMODc6zl8N47Oczv7vjzoWOJMUJs1Pg0ZsD1zmISY38P0y/QyEhatZn0B8BmSWxlLQuukatzOepQI6k+HtfyAAjn4UEqnMaXTP1uwLldVAgMBAAECggEAHq2f8MqpYjLiAFZKl9IUs3uFZkEiZsgx9BmbMAb91Aec+WWJG4OLHrNVTG1KWp+IcaQablEa9bBvoToQnS7y5OpOon1d066egg7Ymfmv24NEMM5KRpktCNcOSA0CySpPIB6yrg6EiUr3ixiaFUGABKkxmwgVz/Q15IqM0ZMmCUsC174PMAz1COFZxD0ZX0zgHblOJQW3dc0X3XSzhht8vU02SMoVObQHQfeXEHv3K/RiVj/Ax0bTc5JVkT8dm8xksTtsFCNOzRBqFS6MYqX6U/u0Onz3Jm5Jt7fLWb5n97gZR4SleyGrqxYNb46d9X7mP0ie7E6bzFW0DsWBIeAqVQKBgQDW0We2L1n44yOvJaMs3evpj0nps13jWidt2I3RlZXjWzWHiYQfvhWUWqps/xZBnAYgnN/38xbKzHZeRNhrqOo+VB0WK1IYl0lZVE4l6TNKCsLsUfQzsb1pePkd1eRZA+TSqsi+I/IOQlQU7HA0bMrah/5FYyUBP0jYvCOvYTlZuwKBgQCvkcVRydVlzjUgv7lY5lYvT8IHV5iYO4Qkk2q6Wjv9VUKAJZauurMdiy05PboWfs5kbETdwFybXMBcknIvZO4ihxmwL8mcoNwDVZHI4bXapIKMTCyHgUKvJ9SeTcKGC7ZuQJ8mslRmYox/HloTOXEJgQgPRxXcwa3amzvdZI+6LwKBgQCLsnQqgxKUi0m6bdR2qf7vzTH4258z6X34rjpT0F5AEyF1edVFOz0XU/q+lQhpNEi7zqjLuvbYfSyA026WXKuwSsz7jMJ/oWqev/duKgAjp2npesY/E9gkjfobD+zGgoS9BzkyhXe1FCdP0A6L2S/1+zg88WOwMvJxl6/xLl24XwKBgCm60xSajX8yIQyUpWBM9yUtpueJ2Xotgz4ST+bVNbcEAddll8gWFiaqgug9FLLuFu5lkYTHiPtgc1RNdphvO+62/9MRuLDixwh/2TPO+iNqwKDKJjda8Nei9vVddCPaOtU/xNQ0xLzFJbG9LBmvqH9izOCcu8SJwGHaTcNUeJj/AoGADCJ26cY30c13F/8awAAmFYpZWCuTP5ppTsRmjd63ixlrqgkeLGpJ7kYb5fXkcTycRGYgP0e1kssBGcmE7DuG955fx3ZJESX3GQZ+XfMHvYGONwF1EiK1f0p6+GReC2VlQ7PIkoD9o0hojM6SnWvv9EXNjCPALEbfPFFvcniKVsE=', + pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCTU3gVDv3SRXLOsFln9GEf1nJ/uCEDhOG10eC0H9l9IPpVxjuPT1ep+ykFUdvefq3D3q+W3hbmiHm81o8dYv26RxZIEioToUWp7Ec5M2B/niYoE93za9/ZDwJdl7eh2hNKwAdxTmdbXUPjkIU4vLyHKRFbJIn9X8w9djldz8hoUvC1BK4L1XrT6F2l0ruJXErH2ZwI1youfSzo87TdXIoFKdrQLuW6hOtDCGKTiS+ab/DkMODc6zl8N47Oczv7vjzoWOJMUJs1Pg0ZsD1zmISY38P0y/QyEhatZn0B8BmSWxlLQuukatzOepQI6k+HtfyAAjn4UEqnMaXTP1uwLldVAgMBAAE=' +}, { + id: 'QmZqCdSzgpsmB3Qweb9s4fojAoqELWzqku21UVrqtVSKi4', + privKey: 'CAASpgkwggSiAgEAAoIBAQCdbSEsTmw7lp5HagRcx57DaLiSUEkh4iBcKc7Y+jHICEIA8NIVi9FlfGEZj9G21FpiTR4Cy+BLVEuf8Nm90bym4iV+cSumeS21fvD8xGTEbeKGljs6OYHy3M45JhWF85gqHQJOqZufI2NRDuRgMZEO2+qGEXmSlv9mMXba/+9ecze8nSpB7bG2Z2pnKDeYwhF9Cz+ElMyn7TBWDjJERGVgFbTpdM3rBnbhB/TGpvs732QqZmIBlxnDb/Jn0l1gNZCgkEDcJ/0NDMBJTQ8vbvcdmaw3eaMPLkn1ix4wdu9QWCA0IBtuY1R7vSUtf4irnLJG7DnAw2GfM5QrF3xF1GLXAgMBAAECggEAQ1N0qHoxl5pmvqv8iaFlqLSUmx5y6GbI6CGJMQpvV9kQQU68yjItr3VuIXx8d/CBZyEMAK4oko7OeOyMcr3MLKLy3gyQWnXgsopDjhZ/8fH8uwps8g2+IZuFJrO+6LaxEPGvFu06fOiphPUVfn40R2KN/iBjGeox+AaXijmCqaV2vEdNJJPpMfz6VKZBDLTrbiqvo/3GN1U99PUqfPWpOWR29oAhh/Au6blSqvqTUPXB2+D/X6e1JXv31mxMPK68atDHSUjZWKB9lE4FMK1bkSKJRbyXmNIlbZ9V8X4/0r8/6T7JnW7ZT8ugRkquohmwgG7KkDXB1YsOCKXYUqzVYQKBgQDtnopFXWYl7XUyePJ/2MA5i7eoko9jmF44L31irqmHc5unNf6JlNBjlxTNx3WyfzhUzrn3c18psnGkqtow0tkBj5hmqn8/WaPbc5UA/5R1FNaNf8W5khn7MDm6KtYRPjN9djqTDiVHyC6ljONYd+5S+MqyKVWZ3t/xvG60sw85qwKBgQCpmpDtL+2JBwkfeUr3LyDcQxvbfzcv8lXj2otopWxWiLiZF1HzcqgAa2CIwu9kCGEt9Zr+9E4uINbe1To0b01/FhvR6xKO/ukceGA/mBB3vsKDcRmvpBUp+3SmnhY0nOk+ArQl4DhJ34k8pDM3EDPrixPf8SfVdU/8IM32lsdHhQKBgHLgpvCKCwxjFLnmBzcPzz8C8TOqR3BbBZIcQ34l+wflOGdKj1hsfaLoM8KYn6pAHzfBCd88A9Hg11hI0VuxVACRL5jS7NnvuGwsIOluppNEE8Ys86aXn7/0vLPoab3EWJhbRE48FIHzobmft3nZ4XpzlWs02JGfUp1IAC2UM9QpAoGAeWy3pZhSr2/iEC5+hUmwdQF2yEbj8+fDpkWo2VrVnX506uXPPkQwE1zM2Bz31t5I9OaJ+U5fSpcoPpDaAwBMs1fYwwlRWB8YNdHY1q6/23svN3uZsC4BGPV2JnO34iMUudilsRg+NGVdk5TbNejbwx7nM8Urh59djFzQGGMKeSECgYA0QMCARPpdMY50Mf2xQaCP7HfMJhESSPaBq9V3xY6ToEOEnXgAR5pNjnU85wnspHp+82r5XrKfEQlFxGpj2YA4DRRmn239sjDa29qP42UNAFg1+C3OvXTht1d5oOabaGhU0udwKmkEKUbb0bG5xPQJ5qeSJ5T1gLzLk3SIP0GlSw==', + pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCdbSEsTmw7lp5HagRcx57DaLiSUEkh4iBcKc7Y+jHICEIA8NIVi9FlfGEZj9G21FpiTR4Cy+BLVEuf8Nm90bym4iV+cSumeS21fvD8xGTEbeKGljs6OYHy3M45JhWF85gqHQJOqZufI2NRDuRgMZEO2+qGEXmSlv9mMXba/+9ecze8nSpB7bG2Z2pnKDeYwhF9Cz+ElMyn7TBWDjJERGVgFbTpdM3rBnbhB/TGpvs732QqZmIBlxnDb/Jn0l1gNZCgkEDcJ/0NDMBJTQ8vbvcdmaw3eaMPLkn1ix4wdu9QWCA0IBtuY1R7vSUtf4irnLJG7DnAw2GfM5QrF3xF1GLXAgMBAAE=' +}, { + id: 'QmR5VwgsL7jyfZHAGyp66tguVrQhCRQuRc3NokocsCZ3fA', + privKey: 'CAASpwkwggSjAgEAAoIBAQCGXYU+uc2nn1zuJhfdFOl34upztnrD1gpHu58ousgHdGlGgYgbqLBAvIAauXdEL0+e30HofjA634SQxE+9nV+0FQBam1DDzHQlXsuwHV+2SKvSDkk4bVllMFpu2SJtts6VH+OXC/2ANJOm+eTALykQPYXgLIBxrhp/eD+Jz5r6wW2nq3k6OmYyK/4pgGzFjo5UyX+fa/171AJ68UPboFpDy6BZCcUjS0ondxPvD7cv5jMNqqMKIB/7rpi8n+Q3oeccRqVL56wH+FE3/QLjwYHwY6ILNRyvNXRqHjwBEXB2R5moXN0AFUWTw9rt3KhFiEjR1U81BTw5/xS7W2Iu0FgZAgMBAAECggEAS64HK8JZfE09eYGJNWPe8ECmD1C7quw21BpwVe+GVPSTizvQHswPohbKDMNj0srXDMPxCnNw1OgqcaOwyjsGuZaOoXoTroTM8nOHRIX27+PUqzaStS6aCG2IsiCozKUHjGTuupftS7XRaF4eIsUtWtFcQ1ytZ9pJYHypRQTi5NMSrTze5ThjnWxtHilK7gnBXik+aR0mYEVfSn13czQEC4rMOs+b9RAc/iibDNoLopfIdvmCCvfxzmySnR7Cu1iSUAONkir7PB+2Mt/qRFCH6P+jMamtCgQ8AmifXgVmDUlun+4MnKg3KrPd6ZjOEKhVe9mCHtGozk65RDREShfDdQKBgQDi+x2MuRa9peEMOHnOyXTS+v+MFcfmG0InsO08rFNBKZChLB+c9UHBdIvexpfBHigSyERfuDye4z6lxi8ZnierWMYJP30nxmrnxwTGTk1MQquhfs1A0kpmDnPsjlOS/drEIEIssNx2WbfJ7YtMxLWBtp+BJzGpQmr0LKC+NHRSrwKBgQCXiy2kJESIUkIs2ihV55hhT6/bZo1B1O5DPA2nkjOBXqXF6fvijzMDX82JjLd07lQZlI0n1Q/Hw0p4iYi9YVd2bLkLXF5UIb2qOeHj76enVFOrPHUSkC9Y2g/0Xs+60Ths2xRd8RrrfQU3kl5iVpBywkCIrb2M5+wRnNTk1W3TtwKBgQCvplyrteAfSurpJhs9JzE8w/hWU9SqAZYkWQp91W1oE95Um2yrbjBAoQxMjaqKS+f/APPIjy56Vqj4aHGyhW11b/Fw3qzfxvCcBKtxOs8eoMlo5FO6QgJJEA4tlcafDcvp0nzjUMqK28safLU7503+33B35fjMXxWdd5u9FaKfCQKBgC4W6j6tuRosymuRvgrCcRnHfpify/5loEFallyMnpWOD6Tt0OnK25z/GifnYDRz96gAAh5HMpFy18dpLOlMHamqz2yhHx8/U8vd5tHIJZlCkF/X91M5/uxrBccwvsT2tM6Got8fYSyVzWxlW8dUxIHiinYHQUsFjkqdBDLEpq5pAoGASoTw5RBEWFM0GuAZdXsyNyxU+4S+grkTS7WdW/Ymkukh+bJZbnvF9a6MkSehqXnknthmufonds2AFNS//63gixENsoOhzT5+2cdfc6tJECvJ9xXVXkf85AoQ6T/RrXF0W4m9yQyCngNJUrKUOIH3oDIfdZITlYzOC3u1ojj7VuQ=', + pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCGXYU+uc2nn1zuJhfdFOl34upztnrD1gpHu58ousgHdGlGgYgbqLBAvIAauXdEL0+e30HofjA634SQxE+9nV+0FQBam1DDzHQlXsuwHV+2SKvSDkk4bVllMFpu2SJtts6VH+OXC/2ANJOm+eTALykQPYXgLIBxrhp/eD+Jz5r6wW2nq3k6OmYyK/4pgGzFjo5UyX+fa/171AJ68UPboFpDy6BZCcUjS0ondxPvD7cv5jMNqqMKIB/7rpi8n+Q3oeccRqVL56wH+FE3/QLjwYHwY6ILNRyvNXRqHjwBEXB2R5moXN0AFUWTw9rt3KhFiEjR1U81BTw5/xS7W2Iu0FgZAgMBAAE=' +}, { + id: 'QmScLDqRg7H6ipCYxm9fVk152UWavQFKscTdoT4YNHxgqp', + privKey: 'CAASpwkwggSjAgEAAoIBAQCWEHaTZ6LBLFP5OPrUqjDM/cF4b2zrfh1Zm3kd02ZtgQB3iYtZqRPJT5ctT3A7WdVF/7dCxPGOCkJlLekTx4Y4gD8JtjA+EfN9fR/2RBKbti2N3CD4vkGp9ss4hbBFcXIhl8zuD/ELHutbV6b8b4QXJGnxfp/B+1kNPnyd7SJznS0QyvI8OLI1nAkVKdYLDRW8kPKeHyx1xhdNDuTQVTFyAjRGQ4e3UYFB7bYIHW3E6kCtCoJDlj+JPC02Yt1LHzIzZVLvPvNFnYY2mag6OiGFuh/oMBIqvnPc1zRZ3eLUqeGZjQVaoR0kdgZUKz7Q2TBeNldxK/s6XO0DnkQTlelNAgMBAAECggEAdmt1dyswR2p4tdIeNpY7Pnj9JNIhTNDPznefI0dArCdBvBMhkVaYk6MoNIxcj6l7YOrDroAF8sXr0TZimMY6B/pERKCt/z1hPWTxRQBBAvnHhwvwRPq2jK6BfhAZoyM8IoBNKowP9mum5QUNdGV4Al8s73KyFX0IsCfgZSvNpRdlt+DzPh+hu/CyoZaMpRchJc1UmK8Fyk3KfO+m0DZNfHP5P08lXNfM6MZLgTJVVgERHyG+vBOzTd2RElMe19nVCzHwb3dPPRZSQ7Fnz3rA+GeLqsM2Zi4HNhfbD1OcD9C4wDj5tYL6hWTkdz4IlfVcjCeUHxgIOhdDV2K+OwbuAQKBgQD0FjUZ09UW2FQ/fitbvIB5f1SkXWPxTF9l6mAeuXhoGv2EtQUO4vq/PK6N08RjrZdWQy6UsqHgffi7lVQ8o3hvCKdbtf4sP+cM92OrY0WZV89os79ndj4tyvmnP8WojwRjt/2XEfgdoWcgWxW9DiYINTOQVimZX+X/3on4s8hEgQKBgQCdY3kOMbyQeLTRkqHXjVTY4ddO+v4S4wOUa1l4rTqAbq1W3JYWwoDQgFuIu3limIHmjnSJpCD4EioXFsM7p6csenoc20sHxsaHnJ6Mn5Te41UYmY9EW0otkQ0C3KbXM0hwQkjyplnEmZawGKmjEHW8DJ3vRYTv9TUCgYKxDHgOzQKBgB4A/NYH7BG61eBYKgxEx6YnuMfbkwV+Vdu5S8d7FQn3B2LgvZZu4FPRqcNVXLbEB+5ao8czjiKCWaj1Wj15+rvrXGcxn+Tglg5J+r5+nXeUC7LbJZQaPNp0MOwWMr3dlrSLUWjYlJ9Pz9VyXOG4c4Rexc/gR4zK9QLW4C7qKpwBAoGAZzyUb0cYlPtYQA+asTU3bnvVKy1f8yuNcZFowst+EDiI4u0WVh+HNzy6zdmLKa03p+/RaWeLaK0hhrubnEnAUmCUMNF3ScaM+u804LDcicc8TkKLwx7ObU0z56isl4RAA8K27tNHFrpYKXJD834cfBkaj5ReOrfw6Y/iFhhDuBECgYEA8gbC76uz7LSHhW30DSRTcqOzTyoe2oYKQaxuxYNp7vSSOkcdRen+mrdflDvud2q/zN2QdL4pgqdldHlR35M/lJ0f0B6zp74jlzbO9700wzsOqreezGc5eWiroDL100U9uIZ50BKb8CKtixIHpinUSPIUcVDkSAZ2y7mbfCxQwqQ=', + pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCWEHaTZ6LBLFP5OPrUqjDM/cF4b2zrfh1Zm3kd02ZtgQB3iYtZqRPJT5ctT3A7WdVF/7dCxPGOCkJlLekTx4Y4gD8JtjA+EfN9fR/2RBKbti2N3CD4vkGp9ss4hbBFcXIhl8zuD/ELHutbV6b8b4QXJGnxfp/B+1kNPnyd7SJznS0QyvI8OLI1nAkVKdYLDRW8kPKeHyx1xhdNDuTQVTFyAjRGQ4e3UYFB7bYIHW3E6kCtCoJDlj+JPC02Yt1LHzIzZVLvPvNFnYY2mag6OiGFuh/oMBIqvnPc1zRZ3eLUqeGZjQVaoR0kdgZUKz7Q2TBeNldxK/s6XO0DnkQTlelNAgMBAAE=' +}, { + id: 'QmckxVrJw1Yo8LqvmDJNUmdAsKtSbiKWmrXJFyKmUraBoN', + privKey: 'CAASpwkwggSjAgEAAoIBAQC1/GFud/7xutux7qRfMj1sIdMRh99/chR6HqVj6LQqrgk4jil0mdN/LCk/tqPqmDtObHdmEhCoybzuhLbCKgUqryKDwO6yBJHSKWY9QqrKZtLJ37SgKwGjE3+NUD4r1dJHhtQrICFdOdSCBzs/v8gi+J+KZLHo7+Nms4z09ysy7qZh94Pd7cW4gmSMergqUeANLD9C0ERw1NXolswOW7Bi7UGr7yuBxejICLO3nkxe0OtpQBrYrqdCD9vs3t/HQZbPWVoiRj4VO7fxkAPKLl30HzcIfxj/ayg8NHcH59d08D+N2v5Sdh28gsiYKIPE9CXvuw//HUY2WVRY5fDC5JglAgMBAAECggEBAKb5aN/1w3pBqz/HqRMbQpYLNuD33M3PexBNPAy+P0iFpDo63bh5Rz+A4lvuFNmzUX70MFz7qENlzi6+n/zolxMB29YtWBUH8k904rTEjXXl//NviQgITZk106tx+4k2x5gPEm57LYGfBOdFAUzNhzDnE2LkXwRNzkS161f7zKwOEsaGWRscj6UvhO4MIFxjb32CVwt5eK4yOVqtyMs9u30K4Og+AZYTlhtm+bHg6ndCCBO6CQurCQ3jD6YOkT+L3MotKqt1kORpvzIB0ujZRf49Um8wlcjC5G9aexBeGriXaVdPF62zm7GA7RMsbQM/6aRbA1fEQXvJhHUNF9UFeaECgYEA8wCjKqQA7UQnHjRwTsktdwG6szfxd7z+5MTqHHTWhWzgcQLgdh5/dO/zanEoOThadMk5C1Bqjq96gH2xim8dg5XQofSVtV3Ui0dDa+XRB3E3fyY4D3RF5hHv85O0GcvQc6DIb+Ja1oOhvHowFB1C+CT3yEgwzX/EK9xpe+KtYAkCgYEAv7hCnj/DcZFU3fAfS+unBLuVoVJT/drxv66P686s7J8UM6tW+39yDBZ1IcwY9vHFepBvxY2fFfEeLI02QFM+lZXVhNGzFkP90agNHK01psGgrmIufl9zAo8WOKgkLgbYbSHzkkDeqyjEPU+B0QSsZOCE+qLCHSdsnTmo/TjQhj0CgYAz1+j3yfGgrS+jVBC53lXi0+2fGspbf2jqKdDArXSvFqFzuudki/EpY6AND4NDYfB6hguzjD6PnoSGMUrVfAtR7X6LbwEZpqEX7eZGeMt1yQPMDr1bHrVi9mS5FMQR1NfuM1lP9Xzn00GIUpE7WVrWUhzDEBPJY/7YVLf0hFH08QKBgDWBRQZJIVBmkNrHktRrVddaSq4U/d/Q5LrsCrpymYwH8WliHgpeTQPWmKXwAd+ZJdXIzYjCt202N4eTeVqGYOb6Q/anV2WVYBbM4avpIxoA28kPGY6nML+8EyWIt2ApBOmgGgvtEreNzwaVU9NzjHEyv6n7FlVwlT1jxCe3XWq5AoGASYPKQoPeDlW+NmRG7z9EJXJRPVtmLL40fmGgtju9QIjLnjuK8XaczjAWT+ySI93Whu+Eujf2Uj7Q+NfUjvAEzJgwzuOd3jlQvoALq11kuaxlNQTn7rx0A1QhBgUJE8AkvShPC9FEnA4j/CLJU0re9H/8VvyN6qE0Mho0+YbjpP8=', + pubKey: 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1/GFud/7xutux7qRfMj1sIdMRh99/chR6HqVj6LQqrgk4jil0mdN/LCk/tqPqmDtObHdmEhCoybzuhLbCKgUqryKDwO6yBJHSKWY9QqrKZtLJ37SgKwGjE3+NUD4r1dJHhtQrICFdOdSCBzs/v8gi+J+KZLHo7+Nms4z09ysy7qZh94Pd7cW4gmSMergqUeANLD9C0ERw1NXolswOW7Bi7UGr7yuBxejICLO3nkxe0OtpQBrYrqdCD9vs3t/HQZbPWVoiRj4VO7fxkAPKLl30HzcIfxj/ayg8NHcH59d08D+N2v5Sdh28gsiYKIPE9CXvuw//HUY2WVRY5fDC5JglAgMBAAE=' +}] diff --git a/test/protocol.spec.js b/test/protocol.spec.js new file mode 100644 index 0000000..95b10ed --- /dev/null +++ b/test/protocol.spec.js @@ -0,0 +1,250 @@ +'use strict' +/* eslint-env mocha */ + +const { expect } = require('aegir/utils/chai') + +const { pipe } = require('it-pipe') +const lp = require('it-length-prefixed') +const { collect } = require('streaming-iterables') +const { toBuffer } = require('it-buffer') + +const multiaddr = require('multiaddr') +const PeerId = require('peer-id') +const Libp2p = require('libp2p') + +const RendezvousServer = require('../src') +const { + PROTOCOL_MULTICODEC +} = require('../src/constants') +const { Message } = require('../src/proto') +const MESSAGE_TYPE = Message.MessageType +const RESPONSE_STATUS = Message.ResponseStatus + +const { + createPeerId, + createDatastore, + defaultLibp2pConfig +} = require('./utils') + +const { MULTIADDRS_WEBSOCKETS } = require('./fixtures/browser') +const relayAddr = MULTIADDRS_WEBSOCKETS[0] + +describe('protocol', () => { + const ns = 'test-ns' + const ttl = 7.2e6 * 1e-3 + + let datastore + let rServer + let client + let peerIds + let multiaddrServer + + before(async () => { + peerIds = await createPeerId({ number: 4 }) + }) + + // Create client and server and connect them + beforeEach(async function () { + this.timeout(10e3) + + datastore = createDatastore() + rServer = new RendezvousServer({ + peerId: peerIds[0], + addresses: { + listen: [`${relayAddr}/p2p-circuit`] + }, + ...defaultLibp2pConfig + }, { datastore }) + multiaddrServer = multiaddr(`${relayAddr}/p2p-circuit/p2p/${peerIds[0].toB58String()}`) + + client = await Libp2p.create({ + addresses: { + listen: [`${relayAddr}/p2p-circuit`] + }, + ...defaultLibp2pConfig + }) + + await Promise.all([rServer, client].map((n) => n.start())) + }) + + afterEach(async function () { + this.timeout(10e3) + await datastore.reset() + await Promise.all([rServer, client].map((n) => n.stop())) + }) + + it('can register a namespace', async () => { + const conn = await client.dial(multiaddrServer) + const { stream } = await conn.newStream(PROTOCOL_MULTICODEC) + + const [response] = await pipe( + [Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord: client.peerStore.addressBook.getRawEnvelope(client.peerId), + ns, + ttl + } + })], + lp.encode(), + stream, + lp.decode(), + toBuffer, + collect + ) + + const recMessage = Message.decode(response) + expect(recMessage).to.exist() + expect(recMessage.type).to.eql(MESSAGE_TYPE.REGISTER_RESPONSE) + expect(recMessage.registerResponse.status).to.eql(Message.ResponseStatus.OK) + }) + + it('fails to register if invalid namespace', async () => { + const conn = await client.dial(multiaddrServer) + const { stream } = await conn.newStream(PROTOCOL_MULTICODEC) + + const [response] = await pipe( + [Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord: client.peerStore.addressBook.getRawEnvelope(client.peerId), + ns: 'x'.repeat(300), + ttl + } + })], + lp.encode(), + stream, + lp.decode(), + toBuffer, + collect + ) + + const recMessage = Message.decode(response) + expect(recMessage).to.exist() + expect(recMessage.type).to.eql(MESSAGE_TYPE.REGISTER_RESPONSE) + expect(recMessage.registerResponse.status).to.eql(RESPONSE_STATUS.E_INVALID_NAMESPACE) + }) + + it('fails to register if invalid ttl', async () => { + const conn = await client.dial(multiaddrServer) + const { stream } = await conn.newStream(PROTOCOL_MULTICODEC) + + const [response] = await pipe( + [Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord: client.peerStore.addressBook.getRawEnvelope(client.peerId), + ns, + ttl: 5e10 * 1e-3 + } + })], + lp.encode(), + stream, + lp.decode(), + toBuffer, + collect + ) + + const recMessage = Message.decode(response) + expect(recMessage).to.exist() + expect(recMessage.type).to.eql(MESSAGE_TYPE.REGISTER_RESPONSE) + expect(recMessage.registerResponse.status).to.eql(RESPONSE_STATUS.E_INVALID_TTL) + }) + + it('fails to register if invalid signed peer record', async () => { + const conn = await client.dial(multiaddrServer) + const { stream } = await conn.newStream(PROTOCOL_MULTICODEC) + + const [response] = await pipe( + [Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord: client.peerStore.addressBook.getRawEnvelope(PeerId.createFromCID(relayAddr.getPeerId())), + ns, + ttl + } + })], + lp.encode(), + stream, + lp.decode(), + toBuffer, + collect + ) + + const recMessage = Message.decode(response) + expect(recMessage).to.exist() + expect(recMessage.type).to.eql(MESSAGE_TYPE.REGISTER_RESPONSE) + expect(recMessage.registerResponse.status).to.eql(RESPONSE_STATUS.E_NOT_AUTHORIZED) + }) + + describe('with previous registrations', () => { + beforeEach(async () => { + const conn = await client.dial(multiaddrServer) + const { stream } = await conn.newStream(PROTOCOL_MULTICODEC) + + await pipe( + [Message.encode({ + type: MESSAGE_TYPE.REGISTER, + register: { + signedPeerRecord: client.peerStore.addressBook.getRawEnvelope(client.peerId), + ns, + ttl + } + })], + lp.encode(), + stream, + async (source) => { + for await (const _ of source) { } // eslint-disable-line + } + ) + }) + + it('can unregister a namespace', async () => { + const conn = await client.dial(multiaddrServer) + const { stream } = await conn.newStream(PROTOCOL_MULTICODEC) + + await pipe( + [Message.encode({ + type: MESSAGE_TYPE.UNREGISTER, + unregister: { + id: client.peerId.toBytes(), + ns + } + })], + lp.encode(), + stream, + async (source) => { + for await (const _ of source) { } // eslint-disable-line + } + ) + }) + + it('can discover a peer registered into a namespace', async () => { + const conn = await client.dial(multiaddrServer) + const { stream } = await conn.newStream(PROTOCOL_MULTICODEC) + + const [response] = await pipe( + [Message.encode({ + type: MESSAGE_TYPE.DISCOVER, + discover: { + ns, + limit: 50 + } + })], + lp.encode(), + stream, + lp.decode(), + toBuffer, + collect + ) + + const recMessage = Message.decode(response) + expect(recMessage).to.exist() + expect(recMessage).to.exist() + expect(recMessage.type).to.eql(MESSAGE_TYPE.DISCOVER_RESPONSE) + expect(recMessage.discoverResponse.status).to.eql(Message.ResponseStatus.OK) + expect(recMessage.discoverResponse.registrations).to.exist() + expect(recMessage.discoverResponse.registrations).to.have.lengthOf(1) + }) + }) +}) diff --git a/test/server.id.json b/test/server.id.json deleted file mode 100644 index 51bad08..0000000 --- a/test/server.id.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "id": "QmQ4eanHt2D1ye44ebGH9RB5XTrzMcZENsjdR4Zd2ELTig", - "privKey": "CAASqAkwggSkAgEAAoIBAQC9fPF9cVj8qtRJa57bmRfkb77ViZRG926fDQTAfzX9tICD3hiHYZLD/tb/0cr7z3Y2amZWrSyuCG3pkhEASk6bb6eND134EWH5wUPxGvWKaw1SmndlGUL8xy/EokH17ieoV1s2fGZ0V6GIeh9/z5REQ6rVNhNy3UOnEm2HcDQn9tmT5tALLbuIgvNA6otFq67/tB6PSiC4mP40kgUzO22E+n20f5HntSkWAS1WCsqL4nhRhKGToRIojpgzlEn6EkE45VkyGQvsgTxNAPGaSMHfS8+J4L8LK2nqTgQkrkUcpOAlg3gATaDHTJJyY2hiVAkAuJb8f+hi73f+cSmpbb9BAgMBAAECggEASR8P6YJ1/nrFlNeM49z+FU7x62E98OzGqWXSsZ3lbdPbzAdGm+eRRUTwHqQMmoOCcJk6iLQnC7mBAKM3IE+Mafr6Qzrs3i+HCWQFHeNzYUjSSVAGRuMqsHUE//JFVevjLdkX/7ydpMO0OAA4a4/k/TrHj6NgefDcjHpV/e/UkJ7MOsjN2QWpx2a4rvOHBDK12eNM9T99zw3MUfNDbw5BGPno9mqGN5uVP1csAZnzVLP3G9utr18veBxf06PZrdLnIakD9oMoXaNwrrQj0v1CQuVQO7tuYcm1r/SzkqGEslX8AmaOb1tiugod5n9dsShpfLrZiS91k8lfWFMgVIBw4QKBgQD4d8PF61VnbZaK4PUJXYrvgXY37tP8NkSrSZoJdpL4Y94FSdd7DHTR/CdNOyqqCXAhSgU6PC21+CqUMAxRfrvoe80vhkMNsBBQRIXUD1EFB20XOLlu+VNngMNWweDsRzwa/zkYOZViG2h8db0XYY2ST4G0CTJNniQO/LD6sa9hVQKBgQDDO3dF81njsXtME08IJ7iBVZ5MvOGSrX78nYPSqALZwWwivF8J0TY1gUJTqBDOoav61aqXWjxTRJ3/mab5DUyJzqU8Ho3KPQzoXgdT+HuppjaDBWt6IHW5eOyRQH7skGL5/EvdIpgTDIhlbDU5nVR6FPV4w6LPB3PHFtI3HC1WPQKBgQCv5zINY38R+w6SAZLYb4YV64SDMqyXKOBSl4fa3TxNZ35eJhnMPlRR+P7l+VZKDOZ6Wsn6oXIHGssiICYIZ/2mKEdqNtYv0Y6rFOfd6n4EXm6H+xukihTW+NzSBe4zuHa/8iI8mT+9tgOx4TTeYaz1gR4lFEGtm6CRj6nHwZWVBQKBgQCc/EIqU0XimyJDx/ry2c244fnKRs8zvKKxyo7nYwX3x1qGi+X35OysFWYaEriBDutVZV4pGfwMEM7jatAiz5jN7wZa0068Yl7wsjs+QD5f6jFHJaKIr3U6UIwZOD1XR7ruvPrbtCeImblLpLkfvOzixduk4dsWki1811Lt0ZB7GQKBgA+JFfb6aF+dnBub/sbogB0CGE7h1gHkZ01QlSuS2r7c9KKzyNahQhcv3BOXJaUEV3Atpkwca+P9cRtE72YgrjLeEbmKtlLVeMwO79bmLDsDm3oQlJlTowGnPmTU8QpMrtqxP41/y6t8VJvAmJiFd3Efq2Ojww7/u3IhecNd0hBo", - "pubKey": "CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9fPF9cVj8qtRJa57bmRfkb77ViZRG926fDQTAfzX9tICD3hiHYZLD/tb/0cr7z3Y2amZWrSyuCG3pkhEASk6bb6eND134EWH5wUPxGvWKaw1SmndlGUL8xy/EokH17ieoV1s2fGZ0V6GIeh9/z5REQ6rVNhNy3UOnEm2HcDQn9tmT5tALLbuIgvNA6otFq67/tB6PSiC4mP40kgUzO22E+n20f5HntSkWAS1WCsqL4nhRhKGToRIojpgzlEn6EkE45VkyGQvsgTxNAPGaSMHfS8+J4L8LK2nqTgQkrkUcpOAlg3gATaDHTJJyY2hiVAkAuJb8f+hi73f+cSmpbb9BAgMBAAE=" -} diff --git a/test/server.spec.js b/test/server.spec.js new file mode 100644 index 0000000..680856b --- /dev/null +++ b/test/server.spec.js @@ -0,0 +1,434 @@ +'use strict' +/* eslint-env mocha */ + +const { expect } = require('aegir/utils/chai') +const delay = require('delay') +const sinon = require('sinon') +const pRetry = require('p-retry') +const pWaitFor = require('p-wait-for') + +const multiaddr = require('multiaddr') +const Envelope = require('libp2p/src/record/envelope') +const PeerRecord = require('libp2p/src/record/peer-record') + +const RendezvousServer = require('../src') +const { codes: errCodes } = require('../src/errors') +const { + createPeerId, + createSignedPeerRecord, + createDatastore, + defaultLibp2pConfig +} = require('./utils') + +const testNamespace = 'test-namespace' +const multiaddrs = [multiaddr('/ip4/127.0.0.1/tcp/0')] + +describe('rendezvous server', () => { + const signedPeerRecords = [] + let rServer + let peerIds + let datastore + + before(async () => { + peerIds = await createPeerId({ number: 4 }) + + // Create a signed peer record per peer + for (const peerId of peerIds) { + const spr = await createSignedPeerRecord(peerId, multiaddrs) + signedPeerRecords.push(spr.marshal()) + } + + datastore = createDatastore() + }) + + afterEach(async () => { + await datastore.reset() + rServer && await rServer.stop() + sinon.reset() + }) + + it('can start a rendezvous server', async () => { + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + + await rServer.start() + }) + + it('can add registrations to multiple namespaces', async () => { + const otherNamespace = 'other-namespace' + + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + // Add registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + // Add registration for peer 1 in a different namespace + await rServer.addRegistration(otherNamespace, peerIds[1], signedPeerRecords[1], 1000) + + // Add registration for peer 2 in test namespace + await rServer.addRegistration(testNamespace, peerIds[2], signedPeerRecords[2], 1000) + + const { registrations: testNsRegistrations } = await rServer.getRegistrations(testNamespace) + expect(testNsRegistrations).to.have.lengthOf(2) + + const { registrations: otherNsRegistrations } = await rServer.getRegistrations(otherNamespace) + expect(otherNsRegistrations).to.have.lengthOf(1) + }) + + it('should be able to limit registrations to get', async () => { + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + // Add registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + // Add registration for peer 2 in test namespace + await rServer.addRegistration(testNamespace, peerIds[2], signedPeerRecords[2], 1000) + + let r = await rServer.getRegistrations(testNamespace, { limit: 1 }) + expect(r.registrations).to.have.lengthOf(1) + expect(r.cookie).to.exist() + + r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(2) + expect(r.cookie).to.exist() + }) + + it('can remove registrations from a peer in a given namespace', async () => { + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + // Add registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + // Add registration for peer 2 in test namespace + await rServer.addRegistration(testNamespace, peerIds[2], signedPeerRecords[2], 1000) + + let r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(2) + expect(r.cookie).to.exist() + + // Remove registration for peer0 + await rServer.removeRegistration(testNamespace, peerIds[1]) + + r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(1) + expect(r.cookie).to.exist() + }) + + it('can remove all registrations from a peer', async () => { + const otherNamespace = 'other-namespace' + + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + // Add registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + // Add registration for peer 1 in a different namespace + await rServer.addRegistration(otherNamespace, peerIds[1], signedPeerRecords[1], 1000) + + let r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(1) + + let otherR = await rServer.getRegistrations(otherNamespace) + expect(otherR.registrations).to.have.lengthOf(1) + + // Remove all registrations for peer0 + await rServer.removePeerRegistrations(peerIds[1]) + + r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(0) + + otherR = await rServer.getRegistrations(otherNamespace) + expect(otherR.registrations).to.have.lengthOf(0) + }) + + it('can attempt to remove a registration for a non existent namespace', async () => { + const otherNamespace = 'other-namespace' + + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + await rServer.removeRegistration(otherNamespace, peerIds[1]) + }) + + it('can attempt to remove a registration for a non existent peer', async () => { + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + // Add registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + + let r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(1) + + // Remove registration for peer0 + await rServer.removeRegistration(testNamespace, peerIds[2]) + + r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(1) + }) + + it('only new peers should be returned if cookie given', async () => { + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + // Add registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + + // Get current registrations + const { cookie, registrations } = await rServer.getRegistrations(testNamespace) + expect(cookie).to.exist() + expect(registrations).to.exist() + expect(registrations).to.have.lengthOf(1) + expect(registrations[0].signedPeerRecord).to.exist() + + // Validate peer0 + const envelope = await Envelope.openAndCertify(registrations[0].signedPeerRecord, PeerRecord.DOMAIN) + expect(envelope.peerId.toString()).to.eql(peerIds[1].toString()) + + // Add registration for peer 2 in test namespace + await rServer.addRegistration(testNamespace, peerIds[2], signedPeerRecords[2], 1000) + + // Get second registration by using the cookie + const { cookie: cookie2, registrations: registrations2 } = await rServer.getRegistrations(testNamespace, { cookie }) + expect(cookie2).to.exist() + expect(cookie2).to.eql(cookie) + expect(registrations2).to.exist() + expect(registrations2).to.have.lengthOf(1) + expect(registrations2[0].signedPeerRecord).to.exist() + + // Validate peer1 + const envelope2 = await Envelope.openAndCertify(registrations2[0].signedPeerRecord, PeerRecord.DOMAIN) + expect(envelope2.peerId.toString()).to.eql(peerIds[2].toString()) + + // If no cookie provided, all registrations are given + const { registrations: registrations3 } = await rServer.getRegistrations(testNamespace) + expect(registrations3).to.exist() + expect(registrations3).to.have.lengthOf(2) + }) + + it('no new peers should be returned if there are not new peers since latest query', async () => { + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + // Add registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + + // Get current registrations + const { cookie, registrations } = await rServer.getRegistrations(testNamespace) + expect(cookie).to.exist() + expect(registrations).to.exist() + expect(registrations).to.have.lengthOf(1) + + // Get registrations with same cookie and no new registration + const { cookie: cookie2, registrations: registrations2 } = await rServer.getRegistrations(testNamespace, { cookie }) + expect(cookie2).to.exist() + expect(cookie2).to.eql(cookie) + expect(registrations2).to.exist() + expect(registrations2).to.have.lengthOf(0) + }) + + it('new data for a peer should be returned if registration updated', async () => { + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + // Add registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + + // Get current registrations + const { cookie, registrations } = await rServer.getRegistrations(testNamespace) + expect(cookie).to.exist() + expect(registrations).to.exist() + expect(registrations).to.have.lengthOf(1) + expect(registrations[0].signedPeerRecord).to.exist() + + // Validate peer0 + const envelope = await Envelope.openAndCertify(registrations[0].signedPeerRecord, PeerRecord.DOMAIN) + expect(envelope.peerId.toString()).to.eql(peerIds[1].toString()) + + // Add new registration for peer 1 in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + + // Get registrations with same cookie and no new registration + const { cookie: cookie2, registrations: registrations2 } = await rServer.getRegistrations(testNamespace, { cookie }) + expect(cookie2).to.exist() + expect(cookie2).to.eql(cookie) + expect(registrations2).to.exist() + expect(registrations2).to.have.lengthOf(1) + expect(registrations2[0].signedPeerRecord).to.exist() + + // Validate peer0 + const envelope2 = await Envelope.openAndCertify(registrations2[0].signedPeerRecord, PeerRecord.DOMAIN) + expect(envelope2.peerId.toString()).to.eql(peerIds[1].toString()) + }) + + it('get registrations should throw if no stored cookie is provided', async () => { + const badCookie = String(Math.random() + Date.now()) + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { datastore }) + await rServer.start() + + await expect(rServer.getRegistrations(testNamespace, { cookie: badCookie })) + .to.eventually.be.rejectedWith(Error) + .and.to.have.property('code', errCodes.INVALID_COOKIE) + }) + + it('gc expired records on regular interval', async function () { + this.timeout(35e3) + + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { + datastore, + gcInterval: 1000, + gcBootDelay: 1000, + gcMinInterval: 0, + gcMinRegistrations: 0 + }) + const spy = sinon.spy(rServer, '_gc') + await rServer.start() + + // Add registrations in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1500) + await rServer.addRegistration(testNamespace, peerIds[2], signedPeerRecords[2], 3200) + + let r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(2) + + // wait for firt record to be removed (2nd gc) + await pWaitFor(() => spy.callCount >= 2) + + // wait for second record to be removed + await pRetry(async () => { + r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(0) + }) + }) + + it('gc expired records when maximum threshold', async function () { + this.timeout(35e3) + + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { + datastore, + // gcMinInterval: 0, + gcMaxRegistrations: 2 + }) + const spy = sinon.spy(rServer, '_gc') + await rServer.start() + + // Add registrations in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 500) + + let r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(1) + + // Validate peer + let envelope = await Envelope.openAndCertify(r.registrations[0].signedPeerRecord, PeerRecord.DOMAIN) + expect(envelope.peerId.toString()).to.eql(peerIds[1].toString()) + + // Wait for previous record to be expired + await delay(500) + + // Add registrations in test namespace exceending the max number for gc trigger + await rServer.addRegistration(testNamespace, peerIds[2], signedPeerRecords[2], 3200) + + await pWaitFor(() => spy.callCount === 1) + + // retry as rServer._gc is async and it can be removing + await pRetry(async () => { + r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(1) + + envelope = await Envelope.openAndCertify(r.registrations[0].signedPeerRecord, PeerRecord.DOMAIN) + expect(envelope.peerId.toString()).to.eql(peerIds[2].toString()) + }) + }) + + it('gc expired records when maximum threshold only if gc min interval', async function () { + this.timeout(45e3) + + rServer = new RendezvousServer({ + ...defaultLibp2pConfig, + peerId: peerIds[0] + }, { + datastore, + gcMaxRegistrations: 2 + }) + const spy = sinon.spy(rServer, '_gc') + await rServer.start() + + // Add registrations in test namespace + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 500) + + let r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(1) + + // Wait for previous record to be expired + await delay(500) + + // Add registrations in test namespace exceending the max number for gc trigger + await rServer.addRegistration(testNamespace, peerIds[2], signedPeerRecords[2], 3000) + + // Wait for gc + await pWaitFor(() => spy.callCount === 1) + + // retry as rServer._gc is async and it can take longer to finish + await pRetry(async () => { + r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(1) + }) + + // Wait for second record to be expired + await delay(3000) + + // Add a new registration + await rServer.addRegistration(testNamespace, peerIds[1], signedPeerRecords[1], 1000) + + await Promise.race([ + async () => { + // GC should not be triggered, even with max registrations as minInterval was not reached + await pWaitFor(() => spy.callCount === 2) + throw new Error('should not call gc') + }, + // It should return 0 records, even without gc, as expired records are not returned + await pRetry(async () => { + r = await rServer.getRegistrations(testNamespace) + expect(r.registrations).to.have.lengthOf(0) + }) + ]) + }) +}) diff --git a/test/utils.js b/test/utils.js index 177aef9..d548211 100644 --- a/test/utils.js +++ b/test/utils.js @@ -1,87 +1,138 @@ 'use strict' +const Transport = require('libp2p-websockets') +const Muxer = require('libp2p-mplex') +const { NOISE: Crypto } = require('libp2p-noise') +const PeerId = require('peer-id') + +const pTimes = require('p-times') +const { isNode } = require('ipfs-utils/src/env') + const Libp2p = require('libp2p') -const TCP = require('libp2p-tcp') -const MPLEX = require('libp2p-mplex') -const SPDY = require('libp2p-spdy') -const SECIO = require('libp2p-secio') - -const Id = require('peer-id') -const Peer = require('peer-info') - -const Server = require('../src/server') -const Client = require('../src') - -const Utils = module.exports = (id, addrs, cb) => { - Id.createFromJSON(require(id), (err, id) => { - if (err) return cb(err) - const peer = new Peer(id) - addrs.forEach(a => peer.multiaddrs.add(a)) - - const swarm = new Libp2p({ - transport: [ - new TCP() - ], - connection: { - muxer: [ - MPLEX, - SPDY - ], - crypto: [SECIO] - } - }, peer, null, { - relay: { - enabled: true, - hop: { - enabled: true, - active: false - } - } - }) - - swarm.start(err => { - if (err) return cb(err) - cb(null, swarm) - }) - }) +const multiaddr = require('multiaddr') +const Envelope = require('libp2p/src/record/envelope') +const PeerRecord = require('libp2p/src/record/peer-record') + +const RendezvousServer = require('../src') + +const Peers = require('./fixtures/peers') +const { MULTIADDRS_WEBSOCKETS } = require('./fixtures/browser') +const relayAddr = MULTIADDRS_WEBSOCKETS[0] + +const defaultConfig = { + modules: { + transport: [Transport], + streamMuxer: [Muxer], + connEncryption: [Crypto] + } } -Utils.id = (id, addrs, cb) => { - Id.createFromJSON(require(id), (err, id) => { - if (err) return cb(err) - const peer = new Peer(id) - addrs.forEach(a => peer.multiaddrs.add(a)) - cb(null, peer) - }) +module.exports.defaultLibp2pConfig = defaultConfig + +/** + * Create Perr Id. + * + * @param {Object} [properties] + * @param {number} [properties.number = 1] - number of peers. + * @param {boolean} [properties.fixture = true] + * @returns {Promise>} + */ +async function createPeerId ({ number = 1, fixture = true } = {}) { + const peerIds = await pTimes(number, (i) => fixture + ? PeerId.createFromJSON(Peers[i]) + : PeerId.create()) + + return peerIds } -Utils.createServer = (id, addrs, opt, cb) => { - Utils(id, addrs, (err, swarm) => { - if (err) return cb(err) - const server = new Server(Object.assign(opt || {}, {node: swarm})) - server.start() - return cb(null, server, swarm) - }) +module.exports.createPeerId = createPeerId + +/** + * Create libp2p nodes. + * + * @param {Object} [properties] + * @param {Object} [properties.config = {}] + * @param {number} [properties.number = 1] - number of peers + * @param {boolean} [properties.started = true] - nodes should start + * @returns {Promise>} + */ +async function createPeer ({ number = 1, started = true, config = {} } = {}) { + const peerIds = await pTimes(number, (i) => PeerId.createFromJSON(Peers[i])) + const peers = await pTimes(number, (i) => Libp2p.create({ + peerId: peerIds[i], + addresses: { + listen: [multiaddr(`${relayAddr}/p2p-circuit`)] + }, + ...defaultConfig, + ...config + })) + + if (started) { + await Promise.all(peers.map((p) => p.start())) + } + + return peers } -Utils.createClient = (id, addrs, cb) => { - Utils(id, addrs, (err, swarm) => { - if (err) return cb(err) - const client = new Client(swarm) - client.start(err => { - if (err) return cb(err) - return cb(null, client, swarm) - }) +module.exports.createPeer = createPeer + +/** + * Create rendezvous server. + * + * @param {Object} [properties] + * @param {Object} [properties.config = {}] + * @param {boolean} [properties.started = true] - node should start + */ +async function createRendezvousServer ({ config = {}, started = true } = {}) { + const [peerId] = await createPeerId({ fixture: false }) + + const datastore = createDatastore() + const rendezvous = new RendezvousServer({ + peerId: peerId, + addresses: { + listen: [`${relayAddr}/p2p-circuit`] + }, + ...defaultConfig, + ...config + }, { datastore }) + + if (started) { + await rendezvous.start() + } + + return rendezvous +} + +module.exports.createRendezvousServer = createRendezvousServer + +async function createSignedPeerRecord (peerId, multiaddrs) { + const pr = new PeerRecord({ + peerId, + multiaddrs }) + + const envelope = await Envelope.seal(pr, peerId) + + return envelope } -Utils.default = cb => Utils.createServer('./server.id.json', ['/ip4/0.0.0.0/tcp/0'], {}, (err, server) => { - if (err) return cb(err) - Utils.createClient('./client.id.json', ['/ip4/0.0.0.0/tcp/0'], (err, client) => { - if (err) return cb(err) - Utils.createClient('./client2.id.json', ['/ip4/0.0.0.0/tcp/0'], (err, client2) => { - if (err) return cb(err) - return cb(null, client, server, client2) - }) +module.exports.createSignedPeerRecord = createSignedPeerRecord + +function createDatastore () { + if (!isNode) { + const Memory = require('../src/datastores/memory') + return new Memory() + } + + const MySql = require('../src/datastores/mysql') + const datastore = new MySql({ + host: 'localhost', + user: 'root', + password: 'test-secret-pw', + database: 'libp2p_rendezvous_db' }) -}) + + return datastore +} + +module.exports.createDatastore = createDatastore diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..5b9a618 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "./node_modules/aegir/src/config/tsconfig.aegir.json", + "compilerOptions": { + "outDir": "dist" + }, + "include": [ + "src" + ] +} \ No newline at end of file