mirror of
https://github.com/fluencelabs/registry.git
synced 2025-04-25 02:02:14 +00:00
Compare commits
203 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
41e9b5ac87 | ||
|
be6ca2e4b4 | ||
|
4efd6b2645 | ||
|
586249873f | ||
|
f4829267d0 | ||
|
2181b22142 | ||
|
fed52dcd06 | ||
|
aaf7f68ce8 | ||
|
e9ba1ad248 | ||
|
a100d2f5d2 | ||
|
5c9af8bd3a | ||
|
e000e46128 | ||
|
f78212d49b | ||
|
b71b85ca1e | ||
|
7d9327bcfd | ||
|
b9bce2e764 | ||
|
c6c579479e | ||
|
3092907e5e | ||
|
01d8cb4052 | ||
|
633d8e6648 | ||
|
7ba20dcabd | ||
|
ddce821a57 | ||
|
c207f7fa54 | ||
|
863ae55f35 | ||
|
a6aeeea3f5 | ||
|
66a42f7b93 | ||
|
7b892678b1 | ||
|
64b3b4b9c9 | ||
|
c160475157 | ||
|
d77fd12b4d | ||
|
2405f41702 | ||
|
2a440a8b1f | ||
|
19f064519d | ||
|
9e5dc63fb2 | ||
|
5ea6ff36ed | ||
|
517872aa84 | ||
|
a6514ffce9 | ||
|
d9a8a20d58 | ||
|
0985109026 | ||
|
acfa42850b | ||
|
0394560c06 | ||
|
3d378b92fb | ||
|
394b5b8395 | ||
|
81f15d4eb7 | ||
|
d5c2c7c6c1 | ||
|
19f5d47add | ||
|
2259425976 | ||
|
8ff086a206 | ||
|
a7051d748b | ||
|
1f44cdc3b1 | ||
|
9b7c2807dc | ||
|
4cb1b90a95 | ||
|
0ac1b76fe1 | ||
|
d63183a7d7 | ||
|
2c29fea098 | ||
|
9b4142dc95 | ||
|
19128039df | ||
|
8901137546 | ||
|
5269c1ebf6 | ||
|
1a12a56f3e | ||
|
be441e86cb | ||
|
bb33b8cc22 | ||
|
6b47a7c423 | ||
|
0b603e7ff3 | ||
|
14d63141cb | ||
|
257516e74f | ||
|
3c0e44488f | ||
|
851dae2aa5 | ||
|
c6bdf2d55a | ||
|
ba3e66b496 | ||
|
990b588b75 | ||
|
083b16c776 | ||
|
a08ee16ff9 | ||
|
5acf1d230b | ||
|
db68f9bf45 | ||
|
ea9ff20493 | ||
|
d96f5a4a0d | ||
|
975ced7a2b | ||
|
2245f2a239 | ||
|
6f682e1f7d | ||
|
d91e24f701 | ||
|
0b73550887 | ||
|
061cf2f818 | ||
|
77dd4cdc4c | ||
|
88f9044553 | ||
|
25ee3b7ae1 | ||
|
5e460e3e24 | ||
|
db5466902b | ||
|
19942c4f30 | ||
|
990ab2be6c | ||
|
204af45000 | ||
|
46c852b771 | ||
|
e695734b88 | ||
|
e73a515649 | ||
|
70fb9b86a1 | ||
|
e66457c0ff | ||
|
d040ff0a12 | ||
|
4fd0895ab8 | ||
|
f3d4ee7c9f | ||
|
dfd714120a | ||
|
e520d80378 | ||
|
9665271331 | ||
|
40410609b3 | ||
|
b8e74443e2 | ||
|
dfe043d1df | ||
|
d2c5b69658 | ||
|
234eae986f | ||
|
d27f232fb4 | ||
|
dc3dc5e45f | ||
|
0eee1cb057 | ||
|
a4f01c5f0b | ||
|
3960180246 | ||
|
5fcdf48b66 | ||
|
f671c8ac15 | ||
|
efb9f6f400 | ||
|
122015f72d | ||
|
8d492113f1 | ||
|
f747661422 | ||
|
9b5cfbd987 | ||
|
a430c92019 | ||
|
6dba72c33c | ||
|
5104d408e8 | ||
|
51e9bb8ad4 | ||
|
201a6602ba | ||
|
451c5be23c | ||
|
9981043448 | ||
|
8c434592b1 | ||
|
269373f0ea | ||
|
0ef980bf9e | ||
|
a253ac4be9 | ||
|
d64b14d395 | ||
|
481a30b2fc | ||
|
55fdbca522 | ||
|
2d4d8e16ce | ||
|
0eefb94ce6 | ||
|
0dd66ad9f4 | ||
|
3d458d2e35 | ||
|
1b20c2634f | ||
|
754410727a | ||
|
a82a7c2500 | ||
|
70d2de3543 | ||
|
22ebd4ab09 | ||
|
494225877f | ||
|
013fde3743 | ||
|
fcbf679e71 | ||
|
c08e3f74a8 | ||
|
cf56bbb9d6 | ||
|
11d1664f83 | ||
|
a0c93db64e | ||
|
1126e01580 | ||
|
9910b74522 | ||
|
1dece8f610 | ||
|
c0265e7f27 | ||
|
6d4f16679e | ||
|
12af2cd369 | ||
|
f8b41189d0 | ||
|
849aac84b5 | ||
|
154a9287cf | ||
|
2e21cdbe39 | ||
|
b3b20d1856 | ||
|
fc313b83ab | ||
|
28fceb2836 | ||
|
1daa2449f0 | ||
|
a6817bcc55 | ||
|
011b297408 | ||
|
0577180737 | ||
|
e47e7716d1 | ||
|
07e06a98e3 | ||
|
8f8797ba9d | ||
|
16cef0ac07 | ||
|
14aba36ca8 | ||
|
57bb879435 | ||
|
9ca1f0498c | ||
|
f179336869 | ||
|
f74548c518 | ||
|
6afc4ce0e5 | ||
|
6b51f9381b | ||
|
fe030623f8 | ||
|
1b4baa05ad | ||
|
382f26e1d6 | ||
|
ca8e638dea | ||
|
a89c7c6850 | ||
|
721264846c | ||
|
f306b2b513 | ||
|
f2640cf372 | ||
|
00493147b4 | ||
|
d4fcadeba0 | ||
|
ad051252ae | ||
|
2d5fb2721e | ||
|
d332bb2935 | ||
|
cde7b4d7f5 | ||
|
fb64f428f0 | ||
|
6b320dd95f | ||
|
7671235b02 | ||
|
d0536f017d | ||
|
4489fe0ec4 | ||
|
1c5ec8754d | ||
|
f8e00527a7 | ||
|
721f60a917 | ||
|
7029436149 | ||
|
d7c62a57d9 | ||
|
78dafff714 | ||
|
6b2d699d64 |
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
@ -0,0 +1,2 @@
|
||||
[registries]
|
||||
fluence = { index = "git://crates.fluence.dev/index" }
|
@ -1,40 +0,0 @@
|
||||
version: 2.1
|
||||
|
||||
orbs:
|
||||
docker: circleci/docker@1.5.0
|
||||
|
||||
jobs:
|
||||
Build and test Rust service:
|
||||
docker:
|
||||
- image: circleci/rust:latest
|
||||
resource_class: xlarge
|
||||
environment:
|
||||
RUST_BACKTRACE: full
|
||||
steps:
|
||||
- checkout
|
||||
- run: |
|
||||
sudo bash .github/download_marine.sh
|
||||
- restore_cache:
|
||||
keys:
|
||||
- registry05-{{ checksum "service/Cargo.lock" }}
|
||||
- run: |
|
||||
cd ./service
|
||||
rustup toolchain install nightly-2022-01-16-x86_64-unknown-linux-gnu
|
||||
rustup default nightly-2022-01-16-x86_64-unknown-linux-gnu
|
||||
rustup override set nightly-2022-01-16-x86_64-unknown-linux-gnu
|
||||
rustup target add wasm32-wasi --toolchain nightly-2022-01-16-x86_64-unknown-linux-gnu
|
||||
- run: ./service/build.sh
|
||||
- run: |
|
||||
cd ./service
|
||||
cargo test --no-fail-fast --release --all-features -- --test-threads=1
|
||||
- save_cache:
|
||||
paths:
|
||||
- ~/.cargo
|
||||
- ~/.rustup
|
||||
key: registry05-{{ checksum "service/Cargo.lock" }}
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
CircleCI:
|
||||
jobs:
|
||||
- Build and test Rust service
|
3
.github/actionlint.yaml
vendored
Normal file
3
.github/actionlint.yaml
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
self-hosted-runner:
|
||||
labels:
|
||||
- builder
|
14
.github/download_marine.sh
vendored
14
.github/download_marine.sh
vendored
@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -o pipefail -o errexit -o nounset
|
||||
set -x
|
||||
|
||||
MARINE_RELEASE="https://api.github.com/repos/fluencelabs/marine/releases/latest"
|
||||
OUT_DIR=/usr/local/bin
|
||||
|
||||
# get metadata about release
|
||||
curl -s -H "Accept: application/vnd.github.v3+json" $MARINE_RELEASE |
|
||||
# extract url and name for asset with name "marine"
|
||||
# also append $OUT_DIR to each name so file is saved to $OUT_DIR
|
||||
jq -r ".assets | .[] | select(.name == \"marine\") | \"\(.browser_download_url) $OUT_DIR/\(.name)\"" |
|
||||
# download assets
|
||||
xargs -n2 bash -c 'curl -L $0 -o $1 && chmod +x $1'
|
134
.github/e2e/docker-compose.yml
vendored
Normal file
134
.github/e2e/docker-compose.yml
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
networks:
|
||||
nox:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 10.50.10.0/24
|
||||
|
||||
services:
|
||||
nox-1:
|
||||
image: ${NOX_IMAGE}
|
||||
ports:
|
||||
- 7771:7771
|
||||
- 9991:9991
|
||||
command:
|
||||
- --aqua-pool-size=2
|
||||
- -t=7771
|
||||
- -w=9991
|
||||
- -x=10.50.10.10
|
||||
- --external-maddrs
|
||||
- /dns4/nox-1/tcp/7771
|
||||
- /dns4/nox-1/tcp/9991/ws
|
||||
- --allow-private-ips
|
||||
- --local
|
||||
# - --bootstraps=/dns/nox-1/tcp/7771
|
||||
# 12D3KooWBM3SdXWqGaawQDGQ6JprtwswEg3FWGvGhmgmMez1vRbR
|
||||
- -k=hK62afickoeP2uZbmSkAYXxxqP8ozq16VRN7qfTP719EHC5V5tjrtW57BSjUr8GvsEXmJRbtejUWyPZ2rZMyQdq
|
||||
networks:
|
||||
nox:
|
||||
ipv4_address: 10.50.10.10
|
||||
|
||||
nox-2:
|
||||
image: ${NOX_IMAGE}
|
||||
ports:
|
||||
- 7772:7772
|
||||
- 9992:9992
|
||||
command:
|
||||
- --aqua-pool-size=2
|
||||
- -t=7772
|
||||
- -w=9992
|
||||
- -x=10.50.10.20
|
||||
- --external-maddrs
|
||||
- /dns4/nox-2/tcp/7772
|
||||
- /dns4/nox-2/tcp/9992/ws
|
||||
- --allow-private-ips
|
||||
- --bootstraps=/dns/nox-1/tcp/7771
|
||||
# 12D3KooWQdpukY3p2DhDfUfDgphAqsGu5ZUrmQ4mcHSGrRag6gQK
|
||||
- -k=2WijTVdhVRzyZamWjqPx4V4iNMrajegNMwNa2PmvPSZV6RRpo5M2fsPWdQr22HVRubuJhhSw8BrWiGt6FPhFAuXy
|
||||
networks:
|
||||
nox:
|
||||
ipv4_address: 10.50.10.20
|
||||
|
||||
nox-3:
|
||||
image: ${NOX_IMAGE}
|
||||
ports:
|
||||
- 7773:7773
|
||||
- 9993:9993
|
||||
command:
|
||||
- --aqua-pool-size=2
|
||||
- -t=7773
|
||||
- -w=9993
|
||||
- -x=10.50.10.30
|
||||
- --external-maddrs
|
||||
- /dns4/nox-3/tcp/7773
|
||||
- /dns4/nox-3/tcp/9993/ws
|
||||
- --allow-private-ips
|
||||
- --bootstraps=/dns/nox-1/tcp/7771
|
||||
# 12D3KooWRT8V5awYdEZm6aAV9HWweCEbhWd7df4wehqHZXAB7yMZ
|
||||
- -k=2n2wBVanBeu2GWtvKBdrYK9DJAocgG3PrTUXMharq6TTfxqTL4sLdXL9BF23n6rsnkAY5pR9vBtx2uWYDQAiZdrX
|
||||
networks:
|
||||
nox:
|
||||
ipv4_address: 10.50.10.30
|
||||
|
||||
nox-4:
|
||||
image: ${NOX_IMAGE}
|
||||
ports:
|
||||
- 7774:7774
|
||||
- 9994:9994
|
||||
command:
|
||||
- --aqua-pool-size=2
|
||||
- -t=7774
|
||||
- -w=9994
|
||||
- -x=10.50.10.40
|
||||
- --external-maddrs
|
||||
- /dns4/nox-4/tcp/7774
|
||||
- /dns4/nox-4/tcp/9994/ws
|
||||
- --allow-private-ips
|
||||
- --bootstraps=/dns/nox-1/tcp/7771
|
||||
# 12D3KooWBzLSu9RL7wLP6oUowzCbkCj2AGBSXkHSJKuq4wwTfwof
|
||||
- -k=4zp8ucAikkjB8CmkufYiFBW4QCDUCbQG7yMjviX7W8bMyN5rfChQ2Pi5QCWThrCTbAm9uq5nbFbxtFcNZq3De4dX
|
||||
networks:
|
||||
nox:
|
||||
ipv4_address: 10.50.10.40
|
||||
|
||||
nox-5:
|
||||
image: ${NOX_IMAGE}
|
||||
ports:
|
||||
- 7775:7775
|
||||
- 9995:9995
|
||||
command:
|
||||
- --aqua-pool-size=2
|
||||
- -t=7775
|
||||
- -w=9995
|
||||
- -x=10.50.10.50
|
||||
- --external-maddrs
|
||||
- /dns4/nox-5/tcp/7775
|
||||
- /dns4/nox-5/tcp/9995/ws
|
||||
- --allow-private-ips
|
||||
- --bootstraps=/dns/nox-1/tcp/7771
|
||||
# 12D3KooWBf6hFgrnXwHkBnwPGMysP3b1NJe5HGtAWPYfwmQ2MBiU
|
||||
- -k=3ry26rm5gkJXvdqRH4FoM3ezWq4xVVsBQF7wtKq4E4pbuaa6p1F84tNqifUS7DdfJL9hs2gcdW64Wc342vHZHMUp
|
||||
networks:
|
||||
nox:
|
||||
ipv4_address: 10.50.10.50
|
||||
|
||||
nox-6:
|
||||
image: ${NOX_IMAGE}
|
||||
ports:
|
||||
- 7776:7776
|
||||
- 9996:9996
|
||||
command:
|
||||
- --aqua-pool-size=2
|
||||
- -t=7776
|
||||
- -w=9996
|
||||
- --bootstraps=/dns/nox-1/tcp/7771
|
||||
- -x=10.50.10.60
|
||||
- --external-maddrs
|
||||
- /dns4/nox-6/tcp/7776
|
||||
- /dns4/nox-6/tcp/9996/ws
|
||||
- --allow-private-ips
|
||||
# 12D3KooWPisGn7JhooWhggndz25WM7vQ2JmA121EV8jUDQ5xMovJ
|
||||
- -k=5Qh8bB1sF28uLPwr3HTvEksCeC6mAWQvebCfcgv9y6j4qKwSzNKm2tzLUg4nACUEo2KZpBw11gNCnwaAdM7o1pEn
|
||||
networks:
|
||||
nox:
|
||||
ipv4_address: 10.50.10.60
|
12
.github/release-please/config.json
vendored
Normal file
12
.github/release-please/config.json
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
{
|
||||
"bootstrap-sha": "1126e01580c2b91674a6c016844e5b1b8c5b2b14",
|
||||
"release-type": "simple",
|
||||
"bump-minor-pre-major": true,
|
||||
"bump-patch-for-minor-pre-major": true,
|
||||
"packages": {
|
||||
".": {
|
||||
"package-name": "registry",
|
||||
"component": "registry"
|
||||
}
|
||||
}
|
||||
}
|
3
.github/release-please/manifest.json
vendored
Normal file
3
.github/release-please/manifest.json
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
".": "0.9.4"
|
||||
}
|
48
.github/renovate.json
vendored
Normal file
48
.github/renovate.json
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:base",
|
||||
":semanticCommitTypeAll(chore)"
|
||||
],
|
||||
"enabledManagers": ["cargo", "npm", "github-actions", "pip_requirements"],
|
||||
"rangeStrategy": "pin",
|
||||
"schedule": "every weekend",
|
||||
"respectLatest": false,
|
||||
"packageRules": [
|
||||
{
|
||||
"matchManagers": ["cargo", "npm"],
|
||||
"matchPackagePatterns": [
|
||||
"@fluencelabs/.*",
|
||||
"fluence-.*",
|
||||
"marine-.*"
|
||||
],
|
||||
"semanticCommitType": "fix",
|
||||
"semanticCommitScope": "deps",
|
||||
"schedule": "at any time"
|
||||
},
|
||||
{
|
||||
"matchManagers": ["cargo"],
|
||||
"groupName": "marine things",
|
||||
"matchPackagePatterns": [
|
||||
"marine-rs-sdk",
|
||||
"marine-rs-sdk-test",
|
||||
"marine-sqlite-connector"
|
||||
],
|
||||
},
|
||||
{
|
||||
"matchDepTypes": ["devDependencies"],
|
||||
"prPriority": -1,
|
||||
"semanticCommitType": "chore",
|
||||
"semanticCommitScope": "deps"
|
||||
},
|
||||
{
|
||||
"matchUpdateTypes": ["major"],
|
||||
"prConcurrentLimit": 1
|
||||
},
|
||||
{
|
||||
"matchManagers": ["github-actions"],
|
||||
"groupName": "all github-actions",
|
||||
"prPriority": -1
|
||||
}
|
||||
]
|
||||
}
|
5
.github/workflows/changelog_config.json
vendored
5
.github/workflows/changelog_config.json
vendored
@ -1,5 +0,0 @@
|
||||
{
|
||||
"template": "${{CHANGELOG}}\n\n${{UNCATEGORIZED}}",
|
||||
"pr_template": "- #${{NUMBER}} ${{TITLE}}",
|
||||
"empty_template": "- no changes"
|
||||
}
|
72
.github/workflows/e2e.yml
vendored
Normal file
72
.github/workflows/e2e.yml
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
name: "e2e"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- ".github/**"
|
||||
- "!.github/workflows/e2e.yml"
|
||||
- "!.github/workflows/snapshot.yml"
|
||||
- "!.github/workflows/tests.yml"
|
||||
types:
|
||||
- "labeled"
|
||||
- "synchronize"
|
||||
- "opened"
|
||||
- "reopened"
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- ".github/**"
|
||||
- "!.github/workflows/e2e.yml"
|
||||
- "!.github/workflows/snapshot.yml"
|
||||
- "!.github/workflows/tests.yml"
|
||||
|
||||
concurrency:
|
||||
group: "${{ github.workflow }}-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
snapshot:
|
||||
if: >
|
||||
github.event_name == 'push' ||
|
||||
contains(github.event.pull_request.labels.*.name, 'e2e')
|
||||
name: "registry"
|
||||
uses: ./.github/workflows/snapshot.yml
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
|
||||
nox:
|
||||
needs:
|
||||
- snapshot
|
||||
uses: fluencelabs/nox/.github/workflows/build.yml@master
|
||||
with:
|
||||
cargo-dependencies: |
|
||||
[
|
||||
{
|
||||
"package": "registry-distro",
|
||||
"version": "=${{ needs.snapshot.outputs.cargo-version }}",
|
||||
"registry": "fluence",
|
||||
"manifest": "crates/system-services/Cargo.toml"
|
||||
}
|
||||
]
|
||||
|
||||
nox-snapshot:
|
||||
name: "nox"
|
||||
needs:
|
||||
- nox
|
||||
|
||||
uses: fluencelabs/nox/.github/workflows/container.yml@master
|
||||
with:
|
||||
image-name: "docker.fluence.dev/registry"
|
||||
|
||||
aqua-tests:
|
||||
name: "registry"
|
||||
needs:
|
||||
- nox-snapshot
|
||||
uses: ./.github/workflows/tests.yml
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
nox-image: "${{ needs.nox-snapshot.outputs.nox-image }}"
|
||||
if-no-artifacts-found: warn
|
21
.github/workflows/lint-pr.yml
vendored
Normal file
21
.github/workflows/lint-pr.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: lint PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: "${{ github.workflow }}-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pr:
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
37
.github/workflows/lint.yml
vendored
Normal file
37
.github/workflows/lint.yml
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
name: lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/**"
|
||||
- ".github/renovate.json"
|
||||
|
||||
concurrency:
|
||||
group: "${{ github.workflow }}-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
reviewdog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Lint actions
|
||||
uses: reviewdog/action-actionlint@v1
|
||||
env:
|
||||
SHELLCHECK_OPTS: "-e SC2086 -e SC2207 -e SC2128"
|
||||
with:
|
||||
reporter: github-pr-check
|
||||
fail_on_error: true
|
||||
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Renovate Config Validator
|
||||
uses: tj-actions/renovate-config-validator@v2
|
||||
with:
|
||||
config_file: .github/renovate.json
|
334
.github/workflows/release.yml
vendored
334
.github/workflows/release.yml
vendored
@ -1,165 +1,231 @@
|
||||
name: "publish-release"
|
||||
name: "release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
branches:
|
||||
- "main"
|
||||
|
||||
concurrency:
|
||||
group: "${{ github.workflow }}-${{ github.ref }}"
|
||||
|
||||
jobs:
|
||||
npm-publish:
|
||||
name: "Publish"
|
||||
release-please:
|
||||
runs-on: ubuntu-latest
|
||||
container: rust
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
outputs:
|
||||
release-created: ${{ steps.release.outputs['release_created'] }}
|
||||
tag-name: ${{ steps.release.outputs['tag_name'] }}
|
||||
version: ${{ steps.release.outputs['version'] }}
|
||||
pr: ${{ steps.release.outputs['pr'] }}
|
||||
|
||||
steps:
|
||||
### Setup
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: Run release-please
|
||||
id: release
|
||||
uses: google-github-actions/release-please-action@v3
|
||||
with:
|
||||
token: ${{ secrets.FLUENCEBOT_RELEASE_PLEASE_PAT }}
|
||||
command: manifest
|
||||
config-file: .github/release-please/config.json
|
||||
manifest-file: .github/release-please/manifest.json
|
||||
|
||||
- name: Set env
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
- name: Show output from release-please
|
||||
if: steps.release.outputs.releases_created
|
||||
env:
|
||||
RELEASE_PLEASE_OUTPUT: ${{ toJSON(steps.release.outputs) }}
|
||||
run: echo "${RELEASE_PLEASE_OUTPUT}" | jq
|
||||
|
||||
- name: Download jq
|
||||
bump-version:
|
||||
if: needs.release-please.outputs.pr != null
|
||||
runs-on: builder
|
||||
needs:
|
||||
- release-please
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ fromJson(needs.release-please.outputs.pr).headBranchName }}
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
uses: dsherret/rust-toolchain-file@v1
|
||||
|
||||
- name: Install cargo-edit
|
||||
uses: baptiste0928/cargo-install@v2.2.0
|
||||
with:
|
||||
crate: cargo-edit
|
||||
|
||||
- name: Get version
|
||||
id: version
|
||||
run: |
|
||||
curl -L https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 -o /usr/local/bin/jq
|
||||
chmod +x /usr/local/bin/jq
|
||||
version="$(jq -r '.[]' .github/release-please/manifest.json)"
|
||||
echo "version=${version}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Download marine
|
||||
run: bash $GITHUB_WORKSPACE/.github/download_marine.sh
|
||||
- name: Set version in service
|
||||
working-directory: service
|
||||
run: cargo set-version ${{ steps.version.outputs.version }}
|
||||
|
||||
- uses: actions/setup-java@v2
|
||||
- name: Set version in distro
|
||||
working-directory: distro
|
||||
run: cargo set-version ${{ steps.version.outputs.version }}
|
||||
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
distribution: 'adopt'
|
||||
java-version: '11'
|
||||
node-version: "18"
|
||||
|
||||
- name: Cache npm
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-v03-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-v03-
|
||||
- name: Set version in aqua
|
||||
working-directory: aqua
|
||||
run: npm version ${{ steps.version.outputs.version }}
|
||||
|
||||
- uses: actions/setup-node@v2
|
||||
- name: Commit version bump
|
||||
uses: stefanzweifel/git-auto-commit-action@v4
|
||||
with:
|
||||
node-version: "15"
|
||||
commit_message: "chore: Bump registry version to ${{ steps.version.outputs.version }}"
|
||||
branch: ${{ fromJson(needs.release-please.outputs.pr).headBranchName }}
|
||||
commit_user_name: fluencebot
|
||||
commit_user_email: devops@fluence.one
|
||||
commit_author: fluencebot <devops@fluence.one>
|
||||
|
||||
registry:
|
||||
runs-on: builder
|
||||
|
||||
needs: release-please
|
||||
if: needs.release-please.outputs.release-created
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Import secrets
|
||||
uses: hashicorp/vault-action@v2.7.3
|
||||
with:
|
||||
url: https://vault.fluence.dev
|
||||
path: jwt/github
|
||||
role: ci
|
||||
method: jwt
|
||||
jwtGithubAudience: "https://github.com/fluencelabs"
|
||||
jwtTtl: 300
|
||||
exportToken: false
|
||||
secrets: |
|
||||
kv/npmjs/fluencebot token | NODE_AUTH_TOKEN
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
uses: dsherret/rust-toolchain-file@v1
|
||||
|
||||
- name: Setup marine
|
||||
uses: fluencelabs/setup-marine@v1
|
||||
|
||||
- name: Setup fcli
|
||||
uses: fluencelabs/setup-fluence@v1
|
||||
with:
|
||||
version: main
|
||||
|
||||
- name: Build service
|
||||
run: ./build.sh
|
||||
working-directory: service
|
||||
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "18"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
cache-dependency-path: "aqua/package-lock.json"
|
||||
cache: "npm"
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v2
|
||||
- run: npm i
|
||||
working-directory: aqua
|
||||
|
||||
- run: npm run build
|
||||
working-directory: aqua
|
||||
|
||||
- name: Publish to NPM registry
|
||||
run: npm publish --access public
|
||||
working-directory: aqua
|
||||
|
||||
- name: Import secrets
|
||||
uses: hashicorp/vault-action@v2.7.3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
~/.cargo/bin
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
url: https://vault.fluence.dev
|
||||
path: jwt/github
|
||||
role: ci
|
||||
method: jwt
|
||||
jwtGithubAudience: "https://github.com/fluencelabs"
|
||||
jwtTtl: 300
|
||||
exportToken: false
|
||||
secrets: |
|
||||
kv/crates.io/fluencebot token | CARGO_REGISTRY_TOKEN
|
||||
|
||||
- name: Install Rust
|
||||
working-directory: ./service
|
||||
run: |
|
||||
rustup toolchain install nightly-2022-01-16-x86_64-unknown-linux-gnu
|
||||
rustup default nightly-2022-01-16-x86_64-unknown-linux-gnu
|
||||
rustup override set nightly-2022-01-16-x86_64-unknown-linux-gnu
|
||||
rustup target add wasm32-wasi --toolchain nightly-2022-01-16-x86_64-unknown-linux-gnu
|
||||
- name: Setup Rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
|
||||
### Build
|
||||
- name: Build registry.wasm
|
||||
working-directory: ./service
|
||||
- name: Install cargo-workspaces
|
||||
uses: baptiste0928/cargo-install@v2.2.0
|
||||
with:
|
||||
crate: cargo-workspaces
|
||||
|
||||
- name: Build distro
|
||||
run: ./build.sh
|
||||
|
||||
- name: Check Aqua compiles
|
||||
working-directory: ./aqua
|
||||
- name: Publish to crates.io
|
||||
working-directory: ./distro
|
||||
run: |
|
||||
npm i
|
||||
npm run build
|
||||
cargo ws publish \
|
||||
--no-git-commit \
|
||||
--allow-dirty \
|
||||
--from-git \
|
||||
--skip-published \
|
||||
--yes
|
||||
|
||||
- name: Create builtin distribution package
|
||||
slack:
|
||||
if: always()
|
||||
name: "Notify"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
needs:
|
||||
- release-please
|
||||
- registry
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- uses: lwhiteley/dependent-jobs-result-check@v1
|
||||
id: status
|
||||
with:
|
||||
statuses: failure
|
||||
dependencies: ${{ toJSON(needs) }}
|
||||
|
||||
- name: Log output
|
||||
run: |
|
||||
./builtin-package/package.sh
|
||||
echo "statuses:" "${{ steps.status.outputs.statuses }}"
|
||||
echo "jobs:" "${{ steps.status.outputs.jobs }}"
|
||||
echo "found any?:" "${{ steps.status.outputs.found }}"
|
||||
|
||||
- name: Build Changelog
|
||||
id: changelog
|
||||
uses: mikepenz/release-changelog-builder-action@v1
|
||||
- name: Import secrets
|
||||
uses: hashicorp/vault-action@v2.7.3
|
||||
with:
|
||||
configuration: ".github/workflows/changelog_config.json"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
url: https://vault.fluence.dev
|
||||
path: jwt/github
|
||||
role: ci
|
||||
method: jwt
|
||||
jwtGithubAudience: "https://github.com/fluencelabs"
|
||||
jwtTtl: 300
|
||||
exportToken: false
|
||||
secrets: |
|
||||
kv/slack/release-please webhook | SLACK_WEBHOOK_URL
|
||||
|
||||
## Publish
|
||||
- name: Release
|
||||
id: release
|
||||
uses: softprops/action-gh-release@v1
|
||||
- uses: ravsamhq/notify-slack-action@v2
|
||||
if: steps.status.outputs.found == 'true'
|
||||
with:
|
||||
name: registry ${{ env.RELEASE_VERSION }}
|
||||
tag_name: ${{ env.RELEASE_VERSION }}
|
||||
files: |
|
||||
registry.tar.gz
|
||||
body: ${{steps.changelog.outputs.changelog}}
|
||||
draft: false
|
||||
prerelease: false
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
### Publish Aqua API
|
||||
- name: Publish Aqua API
|
||||
run: |
|
||||
npm version ${{ env.RELEASE_VERSION }} --allow-same-version
|
||||
npm publish --access public
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
working-directory: ./aqua
|
||||
|
||||
## Update node-distro repo
|
||||
- name: Get tar.gz URL
|
||||
id: package-url
|
||||
uses: actions/github-script@v4
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
result-encoding: string
|
||||
script: |
|
||||
try {
|
||||
let assets = await github.repos.listReleaseAssets({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: "${{ steps.release.outputs.id }}",
|
||||
});
|
||||
console.dir(assets);
|
||||
let package = assets.data.find((a) => a.name === 'registry.tar.gz');
|
||||
let url = package.browser_download_url;
|
||||
console.log("URL: " + url);
|
||||
return url;
|
||||
} catch (e) {
|
||||
console.log("Err: " + e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
- name: Calculate SHA256
|
||||
run: |
|
||||
du -hs registry.tar.gz
|
||||
echo $(sha256sum registry.tar.gz)
|
||||
echo "SHA256=$(sha256sum registry.tar.gz | awk '{ print $1 }')" >> $GITHUB_ENV
|
||||
|
||||
- name: Update version in node-distro repo
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
with:
|
||||
workflow: update_service
|
||||
repo: fluencelabs/node-distro
|
||||
ref: 'main'
|
||||
token: ${{ secrets.PERSONAL_TOKEN }}
|
||||
inputs: '{
|
||||
"name": "registry",
|
||||
"version": "${{ env.RELEASE_VERSION }}",
|
||||
"url": "${{ steps.package-url.outputs.result }}",
|
||||
"sha256": "${{ env.SHA256 }}"
|
||||
}'
|
||||
|
||||
- name: Log notice
|
||||
uses: actions/github-script@v4
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
console.dir(core);
|
||||
core.info("registry was updated to ${{ env.RELEASE_VERSION }} in node-distro repo");
|
||||
status: "failure"
|
||||
notification_title: "*{workflow}* has {status_message}"
|
||||
message_format: "${{ steps.status.outputs.jobs }} {status_message} in <{repo_url}|{repo}>"
|
||||
footer: "<{run_url}>"
|
||||
|
87
.github/workflows/run-tests.yml
vendored
Normal file
87
.github/workflows/run-tests.yml
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
name: "test"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- ".github/**"
|
||||
- "!.github/workflows/run-tests.yml"
|
||||
- "!.github/workflows/tests.yml"
|
||||
- "!.github/workflows/e2e.yml"
|
||||
types:
|
||||
- "labeled"
|
||||
- "synchronize"
|
||||
- "opened"
|
||||
- "reopened"
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- ".github/**"
|
||||
- "!.github/workflows/run-tests.yml"
|
||||
- "!.github/workflows/tests.yml"
|
||||
- "!.github/workflows/e2e.yml"
|
||||
|
||||
concurrency:
|
||||
group: "${{ github.workflow }}-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
cargo:
|
||||
name: "registry / Run cargo tests"
|
||||
runs-on: builder
|
||||
timeout-minutes: 60
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: service
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
cache: false
|
||||
|
||||
- name: Setup marine
|
||||
uses: fluencelabs/setup-marine@v1
|
||||
|
||||
- name: Build service
|
||||
run: ./build.sh
|
||||
|
||||
- name: Run cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
- name: Run cargo clippy
|
||||
run: cargo clippy -Z unstable-options --all -- -D warnings
|
||||
|
||||
- name: Install cargo-nextest
|
||||
uses: baptiste0928/cargo-install@v2.2.0
|
||||
with:
|
||||
crate: cargo-nextest
|
||||
version: 0.9.22
|
||||
|
||||
- name: Run cargo nextest
|
||||
env:
|
||||
NEXTEST_RETRIES: 2
|
||||
NEXTEST_TEST_THREADS: 1
|
||||
run: cargo nextest run --release --all-features --no-fail-fast
|
||||
|
||||
lints:
|
||||
name: Lints
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
|
||||
- name: Run cargo fmt
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all --manifest-path service/Cargo.toml -- --check
|
104
.github/workflows/snapshot.yml
vendored
Normal file
104
.github/workflows/snapshot.yml
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
name: Build snapshot
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
cargo-dependencies:
|
||||
description: "Cargo dependencies map"
|
||||
type: string
|
||||
default: "null"
|
||||
ref:
|
||||
description: "git ref to checkout to"
|
||||
type: string
|
||||
default: "master"
|
||||
snapshot:
|
||||
description: "Whether to publish snapshots"
|
||||
type: boolean
|
||||
default: true
|
||||
outputs:
|
||||
cargo-version:
|
||||
description: "Cargo snapshot version"
|
||||
value: ${{ jobs.snapshot.outputs.version }}
|
||||
|
||||
jobs:
|
||||
snapshot:
|
||||
name: "Build crate"
|
||||
runs-on: builder
|
||||
timeout-minutes: 60
|
||||
|
||||
outputs:
|
||||
version: "${{ steps.snapshot.outputs.version }}"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: fluencelabs/registry
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
uses: dsherret/rust-toolchain-file@v1
|
||||
|
||||
- name: Set dependencies
|
||||
if: inputs.cargo-dependencies != 'null'
|
||||
uses: fluencelabs/github-actions/cargo-set-dependency@main
|
||||
with:
|
||||
dependencies: ${{ inputs.cargo-dependencies }}
|
||||
path: service/
|
||||
|
||||
- name: Setup marine
|
||||
uses: fluencelabs/setup-marine@v1
|
||||
with:
|
||||
artifact-name: marine
|
||||
|
||||
- name: Setup fcli
|
||||
uses: fluencelabs/setup-fluence@v1
|
||||
with:
|
||||
version: main
|
||||
|
||||
- name: Import secrets
|
||||
if: inputs.snapshot == true
|
||||
uses: hashicorp/vault-action@v2.7.3
|
||||
with:
|
||||
url: https://vault.fluence.dev
|
||||
path: jwt/github
|
||||
role: ci
|
||||
method: jwt
|
||||
jwtGithubAudience: "https://github.com/fluencelabs"
|
||||
jwtTtl: 300
|
||||
exportToken: false
|
||||
secrets: |
|
||||
kv/cargo-registry/users/ci token | CARGO_REGISTRIES_FLUENCE_TOKEN
|
||||
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "18"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
cache-dependency-path: "aqua/package-lock.json"
|
||||
cache: "npm"
|
||||
|
||||
- run: npm i
|
||||
working-directory: aqua
|
||||
|
||||
- name: Install cargo-workspaces
|
||||
uses: baptiste0928/cargo-install@v2.2.0
|
||||
with:
|
||||
crate: cargo-workspaces
|
||||
|
||||
- name: Generate snapshot version
|
||||
id: version
|
||||
uses: fluencelabs/github-actions/generate-snapshot-id@main
|
||||
|
||||
- name: Build distro
|
||||
run: ./build.sh
|
||||
|
||||
- name: Publish crate snapshots
|
||||
id: snapshot
|
||||
uses: fluencelabs/github-actions/cargo-publish-snapshot@main
|
||||
with:
|
||||
id: ${{ steps.version.outputs.id }}
|
||||
path: distro
|
17
.github/workflows/tag.yml
vendored
17
.github/workflows/tag.yml
vendored
@ -1,17 +0,0 @@
|
||||
name: "tag"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
tag:
|
||||
name: "Tag"
|
||||
runs-on: "ubuntu-latest"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Bump version and push tag
|
||||
id: tag_version
|
||||
uses: mathieudutour/github-tag-action@v5.5
|
||||
with:
|
||||
github_token: ${{ secrets.PERSONAL_TOKEN }}
|
179
.github/workflows/tests.yml
vendored
Normal file
179
.github/workflows/tests.yml
vendored
Normal file
@ -0,0 +1,179 @@
|
||||
name: Run tests with workflow_call
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
fluence-env:
|
||||
description: "Fluence enviroment to run tests agains"
|
||||
type: string
|
||||
default: "local"
|
||||
nox-image:
|
||||
description: "nox image tag"
|
||||
type: string
|
||||
default: "fluencelabs/nox:unstable"
|
||||
fcli-version:
|
||||
description: "@fluencelabs/cli version"
|
||||
type: string
|
||||
default: "main"
|
||||
if-no-artifacts-found:
|
||||
description: "What to do when no artifacts found in setup-* actions"
|
||||
type: string
|
||||
default: "error"
|
||||
cargo-dependencies:
|
||||
description: "Cargo dependencies map"
|
||||
type: string
|
||||
ref:
|
||||
description: "git ref to checkout to"
|
||||
type: string
|
||||
default: "main"
|
||||
|
||||
env:
|
||||
CI: true
|
||||
FORCE_COLOR: true
|
||||
NOX_IMAGE: "${{ inputs.nox-image }}"
|
||||
FLUENCE_ENV: "${{ inputs.fluence-env }}"
|
||||
|
||||
jobs:
|
||||
aqua:
|
||||
name: "Run aqua tests"
|
||||
runs-on: builder
|
||||
timeout-minutes: 60
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Import secrets
|
||||
uses: hashicorp/vault-action@v2.7.3
|
||||
with:
|
||||
url: https://vault.fluence.dev
|
||||
path: jwt/github
|
||||
role: ci
|
||||
method: jwt
|
||||
jwtGithubAudience: "https://github.com/fluencelabs"
|
||||
jwtTtl: 300
|
||||
secrets: |
|
||||
kv/docker-registry/basicauth/ci username | DOCKER_USERNAME ;
|
||||
kv/docker-registry/basicauth/ci password | DOCKER_PASSWORD ;
|
||||
kv/npm-registry/basicauth/ci token | NODE_AUTH_TOKEN;
|
||||
|
||||
- name: Checkout registry
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: fluencelabs/registry
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Setup node with self-hosted registry
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "18"
|
||||
registry-url: "https://npm.fluence.dev"
|
||||
cache: "npm"
|
||||
cache-dependency-path: "**/package-lock.json"
|
||||
|
||||
- name: Run npm install in aqua
|
||||
run: npm install
|
||||
working-directory: aqua
|
||||
|
||||
- name: Run npm install in aqua-tests
|
||||
run: npm install
|
||||
working-directory: aqua-tests
|
||||
|
||||
- name: Setup fcli
|
||||
uses: fluencelabs/setup-fluence@v1
|
||||
with:
|
||||
artifact: fcli
|
||||
version: ${{ inputs.fcli-version }}
|
||||
if-no-artifact-found: ${{ inputs.if-no-artifacts-found }}
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
uses: dsherret/rust-toolchain-file@v1
|
||||
|
||||
- name: Setup marine
|
||||
uses: fluencelabs/setup-marine@v1
|
||||
with:
|
||||
artifact-name: marine
|
||||
|
||||
- name: Set dependencies
|
||||
if: inputs.cargo-dependencies != ''
|
||||
uses: fluencelabs/github-actions/cargo-set-dependency@main
|
||||
with:
|
||||
dependencies: ${{ inputs.cargo-dependencies }}
|
||||
path: service/
|
||||
|
||||
- name: Build service
|
||||
env:
|
||||
FLUENCE_USER_DIR: "${{ github.workspace }}/tmp/.fluence"
|
||||
run: ./build.sh
|
||||
working-directory: service
|
||||
|
||||
- name: Build distro
|
||||
env:
|
||||
FLUENCE_USER_DIR: "${{ github.workspace }}/tmp/.fluence"
|
||||
run: ./build.sh
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: docker.fluence.dev
|
||||
username: ${{ env.DOCKER_USERNAME }}
|
||||
password: ${{ env.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Pull nox image
|
||||
run: docker pull $NOX_IMAGE
|
||||
|
||||
- name: Run nox network
|
||||
uses: isbang/compose-action@v1.4.1
|
||||
with:
|
||||
compose-file: ".github/e2e/docker-compose.yml"
|
||||
down-flags: "--volumes"
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.9"
|
||||
cache: "pip"
|
||||
cache-dependency-path: aqua-tests/requirements.txt
|
||||
|
||||
- name: Install python requirements
|
||||
run: pip install -r requirements.txt
|
||||
working-directory: aqua-tests
|
||||
|
||||
- name: Install fcli dependencies
|
||||
env:
|
||||
FLUENCE_USER_DIR: "${{ github.workspace }}/tmp/.fluence"
|
||||
run: fluence dep i --no-input
|
||||
working-directory: aqua-tests
|
||||
|
||||
- name: Print fcli version
|
||||
run: pytest -s test_fluence_cli_version.py
|
||||
working-directory: aqua-tests
|
||||
|
||||
- name: Run aqua tests
|
||||
env:
|
||||
FLUENCE_USER_DIR: "${{ github.workspace }}/tmp/.fluence"
|
||||
NPM_CONFIG_REGISTRY: "https://npm.fluence.dev"
|
||||
run: pytest test_aqua.py
|
||||
working-directory: aqua-tests
|
||||
|
||||
- name: Print versions to check summary
|
||||
if: always()
|
||||
working-directory: aqua-tests
|
||||
run: |
|
||||
cat <<SNAPSHOT >> $GITHUB_STEP_SUMMARY
|
||||
## Used versions
|
||||
\`\`\`
|
||||
$(fluence dep v)
|
||||
\`\`\`
|
||||
SNAPSHOT
|
||||
|
||||
- name: Dump container logs
|
||||
if: always()
|
||||
uses: jwalton/gh-docker-logs@v2
|
||||
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
rm -rf tmp ~/.fluence
|
||||
sudo rm -rf registry
|
12
.gitignore
vendored
12
.gitignore
vendored
@ -2,6 +2,9 @@ service/target
|
||||
builtin-package/*.wasm
|
||||
builtin-package/scheduled/*.air
|
||||
registry.tar.gz
|
||||
registry
|
||||
distro/target/
|
||||
distro/registry-service/
|
||||
|
||||
**/*.rs.bk
|
||||
**/.idea
|
||||
@ -9,7 +12,10 @@ registry.tar.gz
|
||||
**/.DS_Store
|
||||
**/node_modules
|
||||
**/dist
|
||||
*.drawio
|
||||
|
||||
# Remove after https://github.com/fluencelabs/aqua/issues/287
|
||||
aqua/target/typescript/**
|
||||
example/src/generated/**
|
||||
aqua/*.tgz
|
||||
examples/src/generated/**
|
||||
|
||||
**/__pycache__
|
||||
tmp
|
||||
|
1
.prettierignore
Normal file
1
.prettierignore
Normal file
@ -0,0 +1 @@
|
||||
.github
|
170
API_reference.md
Normal file
170
API_reference.md
Normal file
@ -0,0 +1,170 @@
|
||||
# API Reference
|
||||
|
||||
- [API Reference](#api-reference)
|
||||
- [Data structures](#data-structures)
|
||||
- [Key](#key)
|
||||
- [RecordMetadata](#recordmetadata)
|
||||
- [Record](#record)
|
||||
- [Tombstone](#tombstone)
|
||||
- [Resources API](#resources-api)
|
||||
- [Overview](#overview)
|
||||
- [Return types](#return-types)
|
||||
- [Methods](#methods)
|
||||
- [createResource](#createresource)
|
||||
- [getResource](#getresource)
|
||||
- [getResourceId](#getresourceid)
|
||||
- [registerService](#registerservice)
|
||||
- [unregisterService](#unregisterservice)
|
||||
- [resolveResource](#resolveresource)
|
||||
- [executeOnResource](#executeonresource)
|
||||
|
||||
## Data structures
|
||||
### Key
|
||||
```rust
|
||||
data Key {
|
||||
-- base58-encoded sha256(concat(label, owner_peer_id))
|
||||
id: string,
|
||||
-- any unique string defined by the owner
|
||||
label: string,
|
||||
-- peer id in base58
|
||||
owner_peer_id: string,
|
||||
-- timestamp of creation in seconds
|
||||
timestamp_created: u64,
|
||||
-- challenge in bytes, will be used for permissions
|
||||
challenge: []u8,
|
||||
-- challenge type, will be used for permissions
|
||||
challenge_type: string,
|
||||
-- encoded and hashed previous fields signed by `owner_peer_id`
|
||||
signature: []u8,
|
||||
}
|
||||
```
|
||||
|
||||
This data structure can be created via [`get_key_bytes`](#get_key_bytes) and [`register_key`](#register_key), and replicated via [`republish_key`](#republish_key). For now, there is no way to remove this structure, it can only be automatically garbage-collected via [`clear_expired`](#clear_expired). In the future updates, key tombstones will be implemented and it would be possible to remove key by an owner.
|
||||
|
||||
In terms of Resources API Keys are Resources.
|
||||
### RecordMetadata
|
||||
```rust
|
||||
data RecordMetadata {
|
||||
-- base58-encoded key id
|
||||
key_id: string,
|
||||
-- peer id of the issuer in base58
|
||||
issued_by: string,
|
||||
-- peer_id of hoster
|
||||
peer_id: string,
|
||||
-- timestamp in seconds
|
||||
timestamp_issued: u64,
|
||||
-- will be used for permissions
|
||||
solution: []u8,
|
||||
-- any user-defined string
|
||||
value: string,
|
||||
-- optional (length is 0 or 1), base58 relay id
|
||||
relay_id: []string,
|
||||
-- optional (length is 0 or 1), advertising service id
|
||||
service_id: []string,
|
||||
-- encoded and hashed previous fields signed by `issued_by`
|
||||
issuer_signature: []u8,
|
||||
}
|
||||
```
|
||||
|
||||
Metadata is the main part of the Record created by issuer that contains routing information, such as optional relay id, peer id and optional service id. Key identifier is a deterministic hash of the `label` and the `owner_peer_id`.
|
||||
|
||||
### Record
|
||||
```rust
|
||||
data Record {
|
||||
-- record metadata
|
||||
metadata: RecordMetadata,
|
||||
-- timestamp in seconds
|
||||
timestamp_created: u64,
|
||||
-- encoded and hashed previous fields signed by `metadata.peer_id`
|
||||
signature: []u8,
|
||||
}
|
||||
```
|
||||
|
||||
Record is maintained by `metadata.peer_id` via renewing of `timestamp_created` field automatically with scheduled scripts for full-featured peers and manually for other peers (Note: here and below we mean Rust peers as full-featured and JS/TS as others). Record can be removed by issuing a tombstone or become expired and then garbage-collected. Record owner is `metadata.issued_by`.
|
||||
|
||||
### Tombstone
|
||||
```rust
|
||||
data Tombstone {
|
||||
-- base58-encoded key id
|
||||
key_id: string,
|
||||
-- peer id of the issuer in base58
|
||||
issued_by: string,
|
||||
-- peer_id of hoster
|
||||
peer_id: string,
|
||||
-- timestamp in seconds
|
||||
timestamp_issued: u64,
|
||||
-- will be used for permissions
|
||||
solution: []u8,
|
||||
-- encoded and hashed previous fields signed by `issued_by`
|
||||
issuer_signature: []u8,
|
||||
}
|
||||
```
|
||||
|
||||
Tombstone is a special type of record that can be issued by record owner which eventually will substitute record with lower `timestamp_issued`. Tombstones replicated alongside with keys and records and live long enough to be sure that certain records will be deleted. Tombstones are garbage-collected automatically.
|
||||
|
||||
In Resources API [`unregisterService`](#unregisterservice) method creates Tombstone.
|
||||
|
||||
## Resources API
|
||||
### Overview
|
||||
Resources API is a high-level API for Registry network protocol. It uses Kademlia for the discovery of resources and service records. Resource and corresponding service Records are identified by Resource ID, and can be found in Registry services on peers in the Kademlia neighborhood of this Resource ID.
|
||||
|
||||
### Return types
|
||||
```
|
||||
alias ResourceId: string
|
||||
alias Resource: Key
|
||||
alias Error: string
|
||||
```
|
||||
|
||||
Notes:
|
||||
- ResourceId is also an alias for key id in Resources API terminology.
|
||||
- Every method (except getResourceId) returns a stream of errors from different peers but success of execution is defined by first part of returned values. Optional types should be checked against `nil` to determine success of execution.
|
||||
|
||||
### Methods
|
||||
#### createResource
|
||||
```rust
|
||||
func createResource(label: string) -> ?ResourceId, *Error:
|
||||
```
|
||||
|
||||
Creates Resource with `label` and `INIT_PEER_ID` as owner.
|
||||
#### getResource
|
||||
```rust
|
||||
func getResource(resource_id: ResourceId) -> ?Resource, *Error:
|
||||
```
|
||||
Returns resource by corresponding `resource_id`.
|
||||
|
||||
#### getResourceId
|
||||
```rust
|
||||
func getResourceId(label: string, peer_id: string) -> ResourceId:
|
||||
```
|
||||
|
||||
Returns a deterministic hash of the `label` and the `peer_id`.
|
||||
#### registerService
|
||||
```rust
|
||||
func registerService(
|
||||
resource_id: ResourceId,
|
||||
value: string,
|
||||
peer_id: PeerId,
|
||||
service_id: ?string
|
||||
) -> bool, *Error:
|
||||
```
|
||||
|
||||
Registers Record issued by `INIT_PEER_ID` for service on `peer_id`. `value` is any user-defined string.
|
||||
#### unregisterService
|
||||
```rust
|
||||
func unregisterService(resource_id: ResourceId, peer_id: PeerId) -> bool, *Error:
|
||||
```
|
||||
|
||||
Prevents the record issued by `INIT_PEER_ID` from being renewed and eventually removed.
|
||||
#### resolveResource
|
||||
```rust
|
||||
func resolveResource(resource_id: ResourceId, ack: i16) -> ?[]Record, *Error:
|
||||
```
|
||||
|
||||
Returns all records registered by this `resource_id`. `ack` is a minimal number of polled peers.
|
||||
|
||||
#### executeOnResource
|
||||
```rust
|
||||
func executeOnResource(resource_id: ResourceId, ack: i16, call: Record -> ()) -> bool, *Error:
|
||||
```
|
||||
|
||||
Resolves all records by given `resource_id` and execites in parallel given callback.
|
172
CHANGELOG.md
Normal file
172
CHANGELOG.md
Normal file
@ -0,0 +1,172 @@
|
||||
# Changelog
|
||||
|
||||
## [0.9.4](https://github.com/fluencelabs/registry/compare/registry-v0.9.3...registry-v0.9.4) (2024-01-04)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **registry:** Use `aqua` keyword instead of `module` ([#313](https://github.com/fluencelabs/registry/issues/313)) ([b9bce2e](https://github.com/fluencelabs/registry/commit/b9bce2e7641d0431d8199d6a104f8c3d2fe3eee5))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** update dependency @fluencelabs/aqua-lib to v0.9.0 ([#328](https://github.com/fluencelabs/registry/issues/328)) ([5c9af8b](https://github.com/fluencelabs/registry/commit/5c9af8bd3a36493802b8e913d917e2fbd1621977))
|
||||
* **deps:** update marine things ([#307](https://github.com/fluencelabs/registry/issues/307)) ([f78212d](https://github.com/fluencelabs/registry/commit/f78212d49bca9fe30def6702ec65aa187fe9deb1))
|
||||
* **deps:** update rust crate fluence-keypair to v0.10.4 ([#318](https://github.com/fluencelabs/registry/issues/318)) ([b71b85c](https://github.com/fluencelabs/registry/commit/b71b85ca1eb0472176b78c237e421ec04418e0d9))
|
||||
* **deps:** update sqlite wasm to 0.18.2 ([#320](https://github.com/fluencelabs/registry/issues/320)) ([7d9327b](https://github.com/fluencelabs/registry/commit/7d9327bcfd11c2dd63b360c96fed045f3f0952c3))
|
||||
* **registry:** Revert release registry 0.9.4 ([#331](https://github.com/fluencelabs/registry/issues/331)) ([e9ba1ad](https://github.com/fluencelabs/registry/commit/e9ba1ad248418e3811fa8d7653545028b7e48127))
|
||||
|
||||
## [0.9.3](https://github.com/fluencelabs/registry/compare/registry-v0.9.2...registry-v0.9.3) (2023-12-21)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* use new aqua packages ([#310](https://github.com/fluencelabs/registry/issues/310)) ([633d8e6](https://github.com/fluencelabs/registry/commit/633d8e6648f344487da68b610857ee9837d0c081))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** update dependency @fluencelabs/trust-graph to v3.1.2 ([#189](https://github.com/fluencelabs/registry/issues/189)) ([7ba20dc](https://github.com/fluencelabs/registry/commit/7ba20dcabd9747256609e9b986bd63f47c94e691))
|
||||
* **spell:** update spell api ([#315](https://github.com/fluencelabs/registry/issues/315)) ([3092907](https://github.com/fluencelabs/registry/commit/3092907e5e5d38caeeda15a83ea11e0462022f41))
|
||||
|
||||
## [0.9.2](https://github.com/fluencelabs/registry/compare/registry-v0.9.1...registry-v0.9.2) (2023-12-19)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* update marine sdk's, sqlite conector and config ([#309](https://github.com/fluencelabs/registry/issues/309)) ([863ae55](https://github.com/fluencelabs/registry/commit/863ae55f35bbe5452b636c064f9f8b377bb10ee8))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **ci:** setup fcli in release step ([#305](https://github.com/fluencelabs/registry/issues/305)) ([7b89267](https://github.com/fluencelabs/registry/commit/7b892678b1003bcf0c0fc834b7b49ceb2172e388))
|
||||
* **deps:** update dependency @fluencelabs/aqua-lib to v0.8.1 ([#249](https://github.com/fluencelabs/registry/issues/249)) ([66a42f7](https://github.com/fluencelabs/registry/commit/66a42f7b935e82af9133e2d5bc2c864cb4296e2f))
|
||||
* **deps:** update dependency @fluencelabs/aqua-lib to v0.8.2 ([#308](https://github.com/fluencelabs/registry/issues/308)) ([c207f7f](https://github.com/fluencelabs/registry/commit/c207f7fa549702c45dd8f25d0f97d95944472e6e))
|
||||
* **deps:** update dependency @fluencelabs/trust-graph to v0.4.7 ([#257](https://github.com/fluencelabs/registry/issues/257)) ([a6aeeea](https://github.com/fluencelabs/registry/commit/a6aeeea3f5eb4f06a99ec272e0f5d3b4b0a2a8a7))
|
||||
|
||||
## [0.9.1](https://github.com/fluencelabs/registry/compare/registry-v0.9.0...registry-v0.9.1) (2023-12-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* use non-npm Fluence CLI ([#302](https://github.com/fluencelabs/registry/issues/302)) ([d77fd12](https://github.com/fluencelabs/registry/commit/d77fd12b4dfe2d57ae3e35f729e35e2f6ad1c63c))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** update dependency @fluencelabs/cli to v0.13.0 ([#290](https://github.com/fluencelabs/registry/issues/290)) ([2a440a8](https://github.com/fluencelabs/registry/commit/2a440a8b1ff8aa922bd2faa982b8b75c9beb3bc7))
|
||||
* **deps:** update rust crate marine-rs-sdk-test to v0.11.1 ([#292](https://github.com/fluencelabs/registry/issues/292)) ([2405f41](https://github.com/fluencelabs/registry/commit/2405f41702543d1ff70620923787a6a7621cc7d5))
|
||||
* remove binary import ([#304](https://github.com/fluencelabs/registry/issues/304)) ([c160475](https://github.com/fluencelabs/registry/commit/c16047515751f1400cb1f7231abcc83e2f6bcf4f))
|
||||
|
||||
## [0.9.0](https://github.com/fluencelabs/registry/compare/registry-v0.8.8...registry-v0.9.0) (2023-11-22)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* **subnetwork:** deprecate registry-based subnets [NET-633] ([#283](https://github.com/fluencelabs/registry/issues/283))
|
||||
|
||||
### Features
|
||||
|
||||
* **subnetwork:** deprecate registry-based subnets [NET-633] ([#283](https://github.com/fluencelabs/registry/issues/283)) ([81f15d4](https://github.com/fluencelabs/registry/commit/81f15d4eb74b730fca331f1ea4ef6b960a02f9c8))
|
||||
|
||||
## [0.8.8](https://github.com/fluencelabs/registry/compare/registry-v0.8.7...registry-v0.8.8) (2023-11-07)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* prepare cli update ([#270](https://github.com/fluencelabs/registry/issues/270)) ([2c29fea](https://github.com/fluencelabs/registry/commit/2c29fea09808e2f98c4f58a10a1587aa5a571ad0))
|
||||
* **registry:** Use streams instead of options [LNG-277] ([#282](https://github.com/fluencelabs/registry/issues/282)) ([19f5d47](https://github.com/fluencelabs/registry/commit/19f5d47add949f62085a022a01b84c83d3fc0389))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **ci:** use unstable nox image ([#255](https://github.com/fluencelabs/registry/issues/255)) ([257516e](https://github.com/fluencelabs/registry/commit/257516e74ff78807f78a7570ccc9e2d685af48f9))
|
||||
* **deps:** unlock and update rust crate serde to 1.0.188 ([#273](https://github.com/fluencelabs/registry/issues/273)) ([4cb1b90](https://github.com/fluencelabs/registry/commit/4cb1b90a95bdc49b87b1dd1336e604cc71444de3))
|
||||
* **deps:** Update cli to 0.11.0 ([#272](https://github.com/fluencelabs/registry/issues/272)) ([0ac1b76](https://github.com/fluencelabs/registry/commit/0ac1b76fe1c0635bfa5cf1105ffaf899db36b300))
|
||||
* **deps:** update dependency @fluencelabs/cli ([#276](https://github.com/fluencelabs/registry/issues/276)) ([2259425](https://github.com/fluencelabs/registry/commit/22594259767fbd5be59904eab080d74733e7ea3e))
|
||||
* **deps:** update dependency @fluencelabs/cli to v0.6.0 ([#238](https://github.com/fluencelabs/registry/issues/238)) ([be441e8](https://github.com/fluencelabs/registry/commit/be441e86cbc07a51636edfd07ec0fc80933b31cf))
|
||||
* **deps:** update dependency @fluencelabs/fluence-network-environment to v1.1.2 ([#277](https://github.com/fluencelabs/registry/issues/277)) ([8ff086a](https://github.com/fluencelabs/registry/commit/8ff086a206d37edaeebe986661b626277e456d95))
|
||||
* **deps:** update marine things ([#278](https://github.com/fluencelabs/registry/issues/278)) ([1f44cdc](https://github.com/fluencelabs/registry/commit/1f44cdc3b1188ef9daaba33a73ee85980c0c8bc6))
|
||||
* **deps:** update rust crate marine-rs-sdk to v0.9.0 ([#265](https://github.com/fluencelabs/registry/issues/265)) ([9b4142d](https://github.com/fluencelabs/registry/commit/9b4142dc951414270f5a76b0519aa749c8835eb6))
|
||||
|
||||
## [0.8.7](https://github.com/fluencelabs/registry/compare/registry-v0.8.6...registry-v0.8.7) (2023-06-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add distro crate [fixes NET-462] ([#233](https://github.com/fluencelabs/registry/issues/233)) ([5acf1d2](https://github.com/fluencelabs/registry/commit/5acf1d230b92f6b0784314b0926b6f6c2e195307))
|
||||
* Migrate Registry to spell ([#247](https://github.com/fluencelabs/registry/issues/247)) ([990b588](https://github.com/fluencelabs/registry/commit/990b588b75857d2f61b76d89999a2c1f09f861f8))
|
||||
* update to node 18 ([a08ee16](https://github.com/fluencelabs/registry/commit/a08ee16ff9dc402e1388e22c57324ca975c1a94d))
|
||||
|
||||
## [0.8.6](https://github.com/fluencelabs/registry/compare/registry-v0.8.5...registry-v0.8.6) (2023-05-19)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **parser:** Fix indentation ([#241](https://github.com/fluencelabs/registry/issues/241)) ([d96f5a4](https://github.com/fluencelabs/registry/commit/d96f5a4a0da7288ef6895c270fe207ea9a9f102d))
|
||||
|
||||
## [0.8.5](https://github.com/fluencelabs/registry/compare/registry-v0.8.4...registry-v0.8.5) (2023-05-08)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **builtin-package:** use new blueprint ([#234](https://github.com/fluencelabs/registry/issues/234)) ([061cf2f](https://github.com/fluencelabs/registry/commit/061cf2f8186192c39946628e21e466323dc31a33))
|
||||
|
||||
## [0.8.4](https://github.com/fluencelabs/registry/compare/registry-v0.8.3...registry-v0.8.4) (2023-04-19)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* update aqua-lib and trust-graph versions ([#229](https://github.com/fluencelabs/registry/issues/229)) ([5e460e3](https://github.com/fluencelabs/registry/commit/5e460e3e2429df909d034193fedf2876f86b18a8))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** pin dependencies ([#198](https://github.com/fluencelabs/registry/issues/198)) ([e66457c](https://github.com/fluencelabs/registry/commit/e66457c0ff696330717e58e3ebb4120709281202))
|
||||
* **deps:** update dependency @fluencelabs/fluence-network-environment to v1.0.14 ([#195](https://github.com/fluencelabs/registry/issues/195)) ([204af45](https://github.com/fluencelabs/registry/commit/204af450001cd6e1ed587111fcc452d41d56a705))
|
||||
|
||||
## [0.8.3](https://github.com/fluencelabs/registry/compare/registry-v0.8.2...registry-v0.8.3) (2023-04-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **sqlite:** bump to v0.18.1 ([#218](https://github.com/fluencelabs/registry/issues/218)) ([4fd0895](https://github.com/fluencelabs/registry/commit/4fd0895ab8415b60eacb34e0a627e9d6d5b5fe2c))
|
||||
|
||||
## [0.8.2](https://github.com/fluencelabs/registry/compare/registry-v0.8.1...registry-v0.8.2) (2023-03-08)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** Update sqlite to 0.8.0 ([#205](https://github.com/fluencelabs/registry/issues/205)) ([d27f232](https://github.com/fluencelabs/registry/commit/d27f232fb44629b18fa45e45b7c33e332f5817fd))
|
||||
|
||||
## [0.8.1](https://github.com/fluencelabs/registry/compare/registry-v0.8.0...registry-v0.8.1) (2023-02-24)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **subnet:** add on HOST_PEER_ID in resolveSubnetwork ([#202](https://github.com/fluencelabs/registry/issues/202)) ([3960180](https://github.com/fluencelabs/registry/commit/3960180246471a78bacf5fa65152a52fb3d4ddf2))
|
||||
|
||||
## [0.8.0](https://github.com/fluencelabs/registry/compare/registry-v0.7.1...registry-v0.8.0) (2023-02-24)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* **storage:** bump SQLite module to 0.18.0 ([#200](https://github.com/fluencelabs/registry/issues/200))
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** bump SQLite module to 0.18.0 ([#200](https://github.com/fluencelabs/registry/issues/200)) ([f671c8a](https://github.com/fluencelabs/registry/commit/f671c8ac1514a11331ae871a7e126f1e908214f6))
|
||||
|
||||
## [0.7.1](https://github.com/fluencelabs/registry/compare/registry-v0.7.0...registry-v0.7.1) (2023-02-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **deals:** register and resolve workers ([#197](https://github.com/fluencelabs/registry/issues/197)) ([8d49211](https://github.com/fluencelabs/registry/commit/8d492113f17ec7add582f7f2d9575fc48b5325dc))
|
||||
* **tests:** Run tests using fluence cli [fixes DXJ-225] ([#165](https://github.com/fluencelabs/registry/issues/165)) ([269373f](https://github.com/fluencelabs/registry/commit/269373f0ea904c572cffa51b8d49a248822c7ff1))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* run all tests with different secret keys [fixes DXJ-242] ([#187](https://github.com/fluencelabs/registry/issues/187)) ([9b5cfbd](https://github.com/fluencelabs/registry/commit/9b5cfbd987259a890933e516e8ec2fee58e149d8))
|
||||
* **tests:** fix registry aqua tests [fixes DXJ-235] ([#178](https://github.com/fluencelabs/registry/issues/178)) ([9981043](https://github.com/fluencelabs/registry/commit/9981043448fa3a9d64353ab763f9985245a6dff0))
|
13
CONTRIBUTING.md
Normal file
13
CONTRIBUTING.md
Normal file
@ -0,0 +1,13 @@
|
||||
## Contribute Code
|
||||
|
||||
You are welcome to contribute to Fluence!
|
||||
|
||||
Things you need to know:
|
||||
|
||||
1. You need to **agree to the [Contributor License Agreement](https://gist.github.com/fluencelabs-org/3f4cbb3cc14c1c0fb9ad99d8f7316ed7) (CLA)**. This is a common practice in all major Open Source projects. At the current moment, we are unable to accept contributions made on behalf of a company. Only individual contributions will be accepted.
|
||||
|
||||
2. **Not all proposed contributions can be accepted**. Some features may, e.g., just fit a third-party add-on better. The contribution must fit the overall direction of Fluence and really improve it. The more effort you invest, the better you should clarify in advance whether the contribution fits: the best way would be to just open an issue to discuss the contribution you plan to make.
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
When you contribute, you have to be aware that your contribution is covered by **[Apache License 2.0](./LICENSE)**, but might relicensed under few other software licenses mentioned in the **Contributor License Agreement**. In particular, you need to agree to the Contributor License Agreement. If you agree to its content, you simply have to click on the link posted by the CLA assistant as a comment to the pull request. Click it to check the CLA, then accept it on the following screen if you agree to it. The CLA assistant will save this decision for upcoming contributions and will notify you if there is any change to the CLA in the meantime.
|
141
INSTALL.md
Normal file
141
INSTALL.md
Normal file
@ -0,0 +1,141 @@
|
||||
# How to Use Registry in Aqua
|
||||
|
||||
## How to install
|
||||
If you use Fluence CLI you can define dependency in `fluence.yaml`:
|
||||
```yaml
|
||||
dependencies:
|
||||
npm:
|
||||
"@fluencelabs/registry": 0.6.2
|
||||
```
|
||||
And then run `fluence dep i`
|
||||
|
||||
If you are developing from scratch without Fluence CLI, you should install it via npm:
|
||||
|
||||
```bash
|
||||
npm i @fluencelabs/registry@nightly
|
||||
```
|
||||
|
||||
## How to import
|
||||
|
||||
```rust
|
||||
import "@fluencelabs/registry/resources-api.aqua"
|
||||
import "@fluencelabs/registry/registry-service.aqua"
|
||||
|
||||
func myFunction(resource_id: string) -> ?[]Record, *Error:
|
||||
result, errors <- resolveResource(resource_id, 2)
|
||||
<- result, errors
|
||||
```
|
||||
|
||||
## How to create a Resource
|
||||
- `createResource(label: string) -> ?ResourceId, *Error`
|
||||
|
||||
Let's register a resource with the label `sample` by `INIT_PEER_ID`:
|
||||
```rust
|
||||
func my_resource() -> ?ResourceId, *Error:
|
||||
id, errors <- createResource("sample")
|
||||
<- id, errors
|
||||
```
|
||||
|
||||
- `label` is a unique string for the peer id
|
||||
- creation is successful if a resource id is returned
|
||||
- `*Error` accumulates errors from all the affected peers
|
||||
|
||||
## How to remove a Resource
|
||||
|
||||
For now there is no method for Resource removal but it can be expired and garbage-collected if it doesn't have any actual records. In the future updates it can be changed.
|
||||
|
||||
## How to register a service
|
||||
```
|
||||
registerService(resource_id: ResourceId, value: string, peer_id: string service_id: ?string) -> bool, *Error
|
||||
```
|
||||
|
||||
Let's register a local service `greeting` and pass a random string `hi` as a value:
|
||||
```rust
|
||||
func registerLocalService(resource_id: ResourceId) -> bool, *Error:
|
||||
success, errors <- registerService(resource_id, "hi", INIT_PEER_ID, ?[greeting])
|
||||
<- success, errors
|
||||
```
|
||||
|
||||
|
||||
Let's register a service `echo` hosted on `peer_id` and pass a random string like `sample` as a value:
|
||||
```rust
|
||||
func registerExternalService(resource_id: ResourceId, peer_id: PeerId) -> bool, *Error:
|
||||
success, errors <- registerService(resource_id, "hi", peer_id, ?[greeting])
|
||||
<- success, errors
|
||||
```
|
||||
|
||||
- `value` is a user-defined string that can be used at the discretion of the user
|
||||
- to update the service record, you should register it again to create a record with a newer timestamp
|
||||
- service record will be automatically updated till deleted via `unregisterService`
|
||||
|
||||
|
||||
## How to unregister a service
|
||||
```
|
||||
func unregisterService(resource_id: ResourceId, peer_id: PeerId) -> bool, *Error:
|
||||
```
|
||||
Let's remove a service record from a target node:
|
||||
```rust
|
||||
func stopProvideExternalService(resource_id: ResourceId, peer_id: PeerId):
|
||||
unregisterService(resource_id, peer_id)
|
||||
```
|
||||
|
||||
- it will be removed from the target node and eventually from the network
|
||||
|
||||
## How to resolve service records
|
||||
- `resolveResource(resource_id: ResourceId, ack: i16) -> ?[]Record, *Error`
|
||||
|
||||
Let's resolve all service records for our resource_id:
|
||||
```rust
|
||||
func getMyRecords(resource_id: ResourceId, consistency_level: i16) -> ?[]Record, *Error:
|
||||
records, error <- resolveResource(resource_id, consistency_level)
|
||||
<- records, error
|
||||
```
|
||||
|
||||
- `ack` represents a minimal number of peers that requested for known records
|
||||
|
||||
## How to execute a callback on Resource
|
||||
- `executeOnResource(resource_id: ResourceId, ack: i16, call: Record -> ()) -> bool, *Error`
|
||||
|
||||
```rust
|
||||
func callProvider(r: Record):
|
||||
-- topological move to a provider via relay
|
||||
on r.metadata.peer_id via r.metadata.relay_id:
|
||||
-- resolve and call your service on a provider
|
||||
MyService r.metadata.service_id!
|
||||
MyService.do_smth()
|
||||
|
||||
-- call on every provider
|
||||
func callEveryone(resource_id: ResourceId, ack: i16) -> bool, *Error:
|
||||
success, errors <- executeOnResource(resource_id, ack, callProvider)
|
||||
```
|
||||
|
||||
- it is a combination of `resolveResource` and a `for` loop through records with the callback execution
|
||||
- it can be useful in case of broadcasting events
|
||||
|
||||
## Replication
|
||||
|
||||
Resources with corresponding records and tombstones are automatically and periodically replicated to the Kademlia neighborhood of `resource_id`.
|
||||
|
||||
## Remarks
|
||||
|
||||
You can redefine [`INITIAL_REPLICATION_FACTOR`](https://github.com/fluencelabs/registry/blob/main/aqua/resources-api.aqua#L10) and [`CONSISTENCY_LEVEL`](https://github.com/fluencelabs/registry/blob/main/aqua/resources-api.aqua#L11). The first constant is used to define the number of peers to which data will be replicated during the API call. This constant doesn't affect the network-wide replication factor, which is defined by Kademlia. The second constant defines the minimal number of peers requested to obtain the data.
|
||||
|
||||
## Use cases
|
||||
|
||||
### Services discovery
|
||||
Discover services without prior knowledge about exact peers and service identifiers.
|
||||
|
||||
### Service high-availability
|
||||
A service provided by several peers still will be available for the client in case of disconnections and other providers' failures.
|
||||
|
||||

|
||||
|
||||
### Subnetwork discovery
|
||||
You can register a group of peers for a resource (without specifying any services). So you "tag" and group the nodes to create a subnetwork.
|
||||
|
||||

|
||||
|
||||
### Load balancer
|
||||
If you have a list of service records updated in runtime, you can create a load-balancing service based on your preferred metrics.
|
||||
|
||||
|
214
LICENSE
214
LICENSE
@ -1,21 +1,201 @@
|
||||
MIT License
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Copyright (c) 2021 Fluence Project
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
1. Definitions.
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
61
README.md
61
README.md
@ -1,22 +1,59 @@
|
||||
# Registry
|
||||
|
||||
[Distributed Hash Table](https://en.wikipedia.org/wiki/Distributed_hash_table) (DHT) implementation for the Fluence network with an Aqua interface.
|
||||
[](https://www.npmjs.com/package/@fluencelabs/registry)
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
Registry is an essential part of the [Fluence network](https://fluence.network) protocol. It provides a Resources API that can be used for service advertisement and discovery. Registry is available (built-in) on every Fluence node, and it provides service advertisement and discovery. The component allows creating relationships between unique identifiers and groups of services on various peers, so that service providers can either join or disconnect anytime and be discoverable on the network.
|
||||
|
||||
There are many [services](https://doc.fluence.dev/docs/concepts#services) in the network on different peers, and there should be a way to find and resolve these services without prior knowledge about exact identifiers. Such an approach brings robustness and flexibility to our solutions in terms of discovery, redundancy and high availability.
|
||||
|
||||
In centralized systems, one can have centralized storage and routing, but in p2p decentralized environments, the problem becomes more challenging. Our solution for the problem is **Registry**, a purpose-driven distributed hash table (DHT), an inherent part of the [Fluence](https://fluence.dev) protocol.
|
||||
|
||||

|
||||
|
||||
However, Registry is not a plain key/value storage. Instead, it is a composition of the Registry service for each network participant and scheduled scripts maintaining replication and garbage collection. Thus, if you want to discover a group of services on different peers without prior knowledge, you should create a **Resource**. A resource is a group of services or peers united by some common feature. Any service is represented by a combination of `service_id` and `peer_id`, it is called a **Record**.
|
||||
|
||||
**Why is Registry important?**
|
||||
|
||||
Scalability, redundancy and high availability are essential parts of a decentralized system, but they are not available out of the box. To enable them, information about services should be bound with peers providing them. Also, such networks are constantly changing, and those changes should be reflected and resolvable to provide uninterruptible access. So there's a need to have a decentralized protocol to update and resolve information about routing, both global and local.
|
||||
|
||||
|
||||
## Installation and Usage
|
||||
|
||||
A complete workflow covering installation of Registry, creating Resources, registering services etc. can be found [here](INSTALL.md).
|
||||
|
||||
|
||||
## Documentation
|
||||
See [Aqua Book](https://fluence.dev/aqua-book/libraries/aqua-dht).
|
||||
|
||||
## How to Use
|
||||
Comprehensive documentation on Fluence can be found [here](https://fluence.dev). In particular, it includes [Aqua Book](https://fluence.dev/docs/aqua-book/getting-started/). Also, check our [YouTube channel](https://www.youtube.com/@fluencelabs). [This presentation](https://www.youtube.com/watch?v=Md0_Ny_5_1o&t=770s) at one of our community calls was especially dedicated to Registry.
|
||||
|
||||
See [example](./example):
|
||||
- How to call [`registry`](./example/src/example.ts) function in TS/JS
|
||||
- Writing an Aqua script using `registry`: [event_example.aqua](./example/src/aqua/event_example.aqua)
|
||||
Resources API is defined in the [resources-api](./aqua/resources-api.aqua) module. Service API is defined in the [registry-service](./aqua/registry-service.aqua) module. For the details, check the [API Reference](./API_reference.md).
|
||||
|
||||
## API
|
||||
|
||||
API is defined in the [routing.aqua](./aqua/routing.aqua) module.
|
||||
## Repository Structure
|
||||
|
||||
## Learn Aqua
|
||||
- [**aqua-tests**](./aqua-tests) contains tests for the Aqua API and use
|
||||
it in e2e tests
|
||||
- [**aqua**](./aqua) is the Registry Aqua API
|
||||
- [**builtin-package**](./builtin-package) contains a build script and
|
||||
config files used for building a standard distro for the Rust peer
|
||||
builtins
|
||||
- [**service**](./service) is the Rust code for the Registry service
|
||||
|
||||
|
||||
## Support
|
||||
|
||||
Please, file an [issue](https://github.com/fluencelabs/registry/issues) if you find a bug. You can also contact us at [Discord](https://discord.com/invite/5qSnPZKh7u) or [Telegram](https://t.me/fluence_project). We will do our best to resolve the issue ASAP.
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
Any interested person is welcome to contribute to the project. Please, make sure you read and follow some basic [rules](./CONTRIBUTING.md).
|
||||
|
||||
|
||||
## License
|
||||
|
||||
All software code is copyright (c) Fluence Labs, Inc. under the [Apache-2.0](./LICENSE) license.
|
||||
|
||||
* [Aqua Book](https://fluence.dev/aqua-book/)
|
||||
* [Aqua Playground](https://github.com/fluencelabs/aqua-playground)
|
||||
* [Aqua repo](https://github.com/fluencelabs/aqua)
|
||||
|
67
aqua-tests/.fluence/aqua-dependencies/package-lock.json
generated
Normal file
67
aqua-tests/.fluence/aqua-dependencies/package-lock.json
generated
Normal file
@ -0,0 +1,67 @@
|
||||
{
|
||||
"name": "aqua-dependencies",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"dependencies": {
|
||||
"@fluencelabs/aqua-lib": "0.8.1",
|
||||
"@fluencelabs/registry": "0.8.7",
|
||||
"@fluencelabs/spell": "0.5.33",
|
||||
"@fluencelabs/trust-graph": "3.1.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@fluencelabs/aqua-lib": {
|
||||
"version": "0.8.1",
|
||||
"resolved": "https://registry.npmjs.org/@fluencelabs/aqua-lib/-/aqua-lib-0.8.1.tgz",
|
||||
"integrity": "sha512-VLslkhi3hsNLWkgsoCyceCediqkicWphMVHZ+9eEkgMumepvo7TcqiYC14bl2LpZjn7YZ6y/OzK+Ffy8ADfKdA=="
|
||||
},
|
||||
"node_modules/@fluencelabs/registry": {
|
||||
"version": "0.8.7",
|
||||
"resolved": "https://registry.npmjs.org/@fluencelabs/registry/-/registry-0.8.7.tgz",
|
||||
"integrity": "sha512-43bmb1v4p5ORvaiLBrUAl+hRPo3luxxBVrJgqTvipJa2OEg2wCRA/Wo9s4M7Lchnv3NoYLOyNTzNyFopQRKILA==",
|
||||
"dependencies": {
|
||||
"@fluencelabs/aqua-lib": "0.7.0",
|
||||
"@fluencelabs/trust-graph": "0.4.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@fluencelabs/registry/node_modules/@fluencelabs/aqua-lib": {
|
||||
"version": "0.7.0",
|
||||
"resolved": "https://registry.npmjs.org/@fluencelabs/aqua-lib/-/aqua-lib-0.7.0.tgz",
|
||||
"integrity": "sha512-mJEaxfAQb6ogVM4l4qw7INK6kvLA2Y161ErwL7IVeVSkKXIeYq/qio2p2au35LYvhBNsKc7XP2qc0uztCmxZzA=="
|
||||
},
|
||||
"node_modules/@fluencelabs/registry/node_modules/@fluencelabs/trust-graph": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/@fluencelabs/trust-graph/-/trust-graph-0.4.1.tgz",
|
||||
"integrity": "sha512-V/6ts4q/Y0uKMS6orVpPyxfdd99YFMkm9wN9U2IFtlBUWNsQZG369FK9qEizwsSRCqTchMHYs8Vh4wgZ2uRfuQ==",
|
||||
"dependencies": {
|
||||
"@fluencelabs/aqua-lib": "^0.7.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@fluencelabs/spell": {
|
||||
"version": "0.5.33",
|
||||
"resolved": "https://registry.npmjs.org/@fluencelabs/spell/-/spell-0.5.33.tgz",
|
||||
"integrity": "sha512-JZ+CWTrBXwX6DilzxsJfg39DMsQN9P/h1jyujcDwIpOKynbGCD84g5t9hsplNVH/pEZwcYtGajDH293Sg54bwA==",
|
||||
"dependencies": {
|
||||
"@fluencelabs/aqua-lib": "0.8.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18",
|
||||
"pnpm": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/@fluencelabs/trust-graph": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@fluencelabs/trust-graph/-/trust-graph-3.1.2.tgz",
|
||||
"integrity": "sha512-HpyHtiomh09wv6/83z+bhbkqVngIUdqNGEXRTIPg4sArVPMZ9UCXBrkQsHDRqdMUx0lBAcgB3IjlbdhkwHGaXA==",
|
||||
"dependencies": {
|
||||
"@fluencelabs/aqua-lib": "^0.5.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@fluencelabs/trust-graph/node_modules/@fluencelabs/aqua-lib": {
|
||||
"version": "0.5.2",
|
||||
"resolved": "https://registry.npmjs.org/@fluencelabs/aqua-lib/-/aqua-lib-0.5.2.tgz",
|
||||
"integrity": "sha512-fmoFFE8myhLH9d+YR0+0ZPL2YIQyR6M1woAGu5d1xXI02Sjzn4id6dE4PpxHb8cSBPRie8AwsKobHCNqGxI8oA=="
|
||||
}
|
||||
}
|
||||
}
|
9
aqua-tests/.fluence/env.yaml
Normal file
9
aqua-tests/.fluence/env.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
# yaml-language-server: $schema=schemas/env.json
|
||||
|
||||
# Defines user project preferences
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/cli/tree/main/docs/configs/env.md
|
||||
|
||||
version: 0
|
||||
|
||||
fluenceEnv: local
|
1
aqua-tests/.fluence/secrets/test_create_resource.txt
Normal file
1
aqua-tests/.fluence/secrets/test_create_resource.txt
Normal file
@ -0,0 +1 @@
|
||||
BNidntUryx+hxr7NK2z9nci23sMn3fURB6bTH1K2Ll4=
|
1
aqua-tests/.fluence/secrets/test_get_resource.txt
Normal file
1
aqua-tests/.fluence/secrets/test_get_resource.txt
Normal file
@ -0,0 +1 @@
|
||||
e72l3wuItcfCcQBP6Rn4L0uQRsKmyckZRbYXP1ms59Q=
|
@ -0,0 +1 @@
|
||||
rZxZGGCxECt1opnXjnxrSpV2g6Qt2Fl0KTDoJkox008=
|
@ -0,0 +1 @@
|
||||
I/ZUMsjlt47e9LxYxbk/LamZJUzNxoBikPA+Qqy8yYA=
|
2
aqua-tests/.gitignore
vendored
Normal file
2
aqua-tests/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/.fluence/schemas
|
||||
/.fluence/aqua-dependencies/package.json
|
16
aqua-tests/README.md
Normal file
16
aqua-tests/README.md
Normal file
@ -0,0 +1,16 @@
|
||||
# Registry API tests
|
||||
|
||||
## How to run
|
||||
|
||||
- `npm i`
|
||||
- `pip3 install -r requirements.txt`
|
||||
- `pip install -U pytest`
|
||||
- `pytest -n auto`
|
||||
|
||||
## Adding new test
|
||||
|
||||
Before adding new test go to the aqua-tests dir first, then run `npm run secret`
|
||||
to add a new key-pair for the new test.
|
||||
Name it the same way the test function will be called (e.g. `test_create_resource`)
|
||||
This is required for tests to run in parallel. Key-pairs could've been generated on the fly
|
||||
but it's a bit faster to not waste time on it each time the tests are run
|
9
aqua-tests/aqua/test.aqua
Normal file
9
aqua-tests/aqua/test.aqua
Normal file
@ -0,0 +1,9 @@
|
||||
aqua Test
|
||||
|
||||
import "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
import "@fluencelabs/registry/resources-api.aqua"
|
||||
|
||||
export getResource, createResource, getResourceId, get_peer_id, registerService, resolveResource, unregisterService
|
||||
|
||||
func get_peer_id() -> PeerId:
|
||||
<- INIT_PEER_ID
|
10
aqua-tests/config.py
Normal file
10
aqua-tests/config.py
Normal file
@ -0,0 +1,10 @@
|
||||
|
||||
def get_local():
|
||||
return [
|
||||
'/ip4/127.0.0.1/tcp/9991/ws/p2p/12D3KooWBM3SdXWqGaawQDGQ6JprtwswEg3FWGvGhmgmMez1vRbR',
|
||||
'/ip4/127.0.0.1/tcp/9992/ws/p2p/12D3KooWQdpukY3p2DhDfUfDgphAqsGu5ZUrmQ4mcHSGrRag6gQK',
|
||||
'/ip4/127.0.0.1/tcp/9993/ws/p2p/12D3KooWRT8V5awYdEZm6aAV9HWweCEbhWd7df4wehqHZXAB7yMZ',
|
||||
'/ip4/127.0.0.1/tcp/9994/ws/p2p/12D3KooWBzLSu9RL7wLP6oUowzCbkCj2AGBSXkHSJKuq4wwTfwof',
|
||||
'/ip4/127.0.0.1/tcp/9995/ws/p2p/12D3KooWBf6hFgrnXwHkBnwPGMysP3b1NJe5HGtAWPYfwmQ2MBiU',
|
||||
'/ip4/127.0.0.1/tcp/9996/ws/p2p/12D3KooWPisGn7JhooWhggndz25WM7vQ2JmA121EV8jUDQ5xMovJ'
|
||||
]
|
18
aqua-tests/fluence.yaml
Normal file
18
aqua-tests/fluence.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
# yaml-language-server: $schema=.fluence/schemas/fluence.json
|
||||
|
||||
# Defines Fluence Project, most importantly - what exactly you want to deploy and how. You can use `fluence init` command to generate a template for new Fluence project
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/fluence.md
|
||||
|
||||
version: 5
|
||||
|
||||
aquaInputPath: aqua/test.aqua
|
||||
|
||||
dependencies:
|
||||
cargo:
|
||||
marine: 0.14.1
|
||||
mrepl: 0.21.3
|
||||
npm:
|
||||
'@fluencelabs/aqua-lib': 0.9.1
|
||||
'@fluencelabs/spell': 0.6.9
|
||||
'@fluencelabs/trust-graph': 0.4.11
|
13
aqua-tests/getDefaultPeers.js
Normal file
13
aqua-tests/getDefaultPeers.js
Normal file
@ -0,0 +1,13 @@
|
||||
const {
|
||||
krasnodar,
|
||||
stage,
|
||||
testNet,
|
||||
} = require('@fluencelabs/fluence-network-environment')
|
||||
|
||||
console.log(
|
||||
JSON.stringify({
|
||||
krasnodar,
|
||||
stage,
|
||||
testnet: testNet,
|
||||
}),
|
||||
)
|
18
aqua-tests/package-lock.json
generated
Normal file
18
aqua-tests/package-lock.json
generated
Normal file
@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "aqua-tests",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"devDependencies": {
|
||||
"@fluencelabs/fluence-network-environment": "1.1.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@fluencelabs/fluence-network-environment": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@fluencelabs/fluence-network-environment/-/fluence-network-environment-1.1.2.tgz",
|
||||
"integrity": "sha512-1Bp2gBy3oMEILMynFpOIFK/q2Pj792xpnb3AJs5QcTQAaHz9V2nrEI8OOPwBAFTmjmLBirXBqQQX63O+ePH7yg==",
|
||||
"dev": true
|
||||
}
|
||||
}
|
||||
}
|
5
aqua-tests/package.json
Normal file
5
aqua-tests/package.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"@fluencelabs/fluence-network-environment": "1.1.2"
|
||||
}
|
||||
}
|
4
aqua-tests/requirements.txt
Normal file
4
aqua-tests/requirements.txt
Normal file
@ -0,0 +1,4 @@
|
||||
delegator.py==0.1.1
|
||||
pytest==7.3.0
|
||||
pytest-xdist==3.2.1
|
||||
pytest-repeat==0.9.1
|
74
aqua-tests/spell/spell.aqua
Normal file
74
aqua-tests/spell/spell.aqua
Normal file
@ -0,0 +1,74 @@
|
||||
aqua TestSpell
|
||||
|
||||
export spell
|
||||
|
||||
import Op, Debug, Peer, Kademlia from "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
import Spell from "@fluencelabs/spell/spell_service.aqua"
|
||||
import Compare from "@fluencelabs/aqua-lib/math.aqua"
|
||||
|
||||
import "@fluencelabs/registry/registry-service.aqua"
|
||||
import "@fluencelabs/registry/registry-api.aqua"
|
||||
import "@fluencelabs/trust-graph/trust-graph.aqua"
|
||||
|
||||
data SpellConfig:
|
||||
expired_interval: u32
|
||||
renew_interval: u32
|
||||
replicate_interval:u32
|
||||
|
||||
-- A hack to allow using timestamp as u32 values
|
||||
-- Aqua doesn't allow truncating values
|
||||
service PeerTimeTrunc("peer"):
|
||||
timestamp_sec() -> u32
|
||||
|
||||
func log_info(spell_id: string, msg: string):
|
||||
Spell spell_id
|
||||
Spell.list_push_string("logs", msg)
|
||||
|
||||
|
||||
-- clears expired records
|
||||
func clear_expired(now:u32):
|
||||
Registry.clear_expired(now)
|
||||
|
||||
-- update stale local records
|
||||
func renew(now:u32):
|
||||
res <- Registry.get_stale_local_records(now)
|
||||
for r <- res.result par:
|
||||
signature <- getRecordSignature(r.metadata, now)
|
||||
putRecord(r.metadata, now, signature.signature!)
|
||||
|
||||
-- get all old records and replicate it by routes
|
||||
func replicate(now:u32):
|
||||
res <- Registry.evict_stale(now)
|
||||
for r <- res.results par:
|
||||
k <- Op.string_to_b58(r.key.id)
|
||||
nodes <- Kademlia.neighborhood(k, nil, nil)
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
tt <- Peer.timestamp_sec()
|
||||
key_weight <- TrustGraph.get_weight(r.key.owner_peer_id, tt)
|
||||
Registry.republish_key(r.key, key_weight, tt)
|
||||
|
||||
records_weights: *WeightResult
|
||||
for record <- r.records:
|
||||
records_weights <- TrustGraph.get_weight(record.metadata.issued_by, tt)
|
||||
Registry.republish_records(r.records, records_weights, tt)
|
||||
|
||||
func spell(config: SpellConfig):
|
||||
Spell "registry-spell"
|
||||
log = (msg: string):
|
||||
log_info("registry-spell", msg)
|
||||
|
||||
check_and_run = (key: string, now:u32, interval: u32, job: u32 -> ()):
|
||||
last_run <- Spell.get_u32(key)
|
||||
need_to_run = !last_run.success || ((now - last_run.value) >= interval)
|
||||
if need_to_run == true:
|
||||
log(Op.concat_strings(Op.concat_strings("Running ", key), "job"))
|
||||
job(now)
|
||||
Spell.set_u32(key, now)
|
||||
|
||||
|
||||
on HOST_PEER_ID:
|
||||
now <- PeerTimeTrunc.timestamp_sec()
|
||||
check_and_run("clear_expired", now, config.expired_interval, clear_expired)
|
||||
check_and_run("renew", now, config.renew_interval, renew)
|
||||
check_and_run("replicate", now, config.replicate_interval, replicate)
|
11
aqua-tests/spell/spell.yaml
Normal file
11
aqua-tests/spell/spell.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
# yaml-language-server: $schema=../.fluence/schemas/spell.yaml.json
|
||||
|
||||
# Defines a spell. You can use `fluence spell new` command to generate a template for new spell
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/spell.md
|
||||
|
||||
version: 0
|
||||
aquaFilePath: ./spell.aqua
|
||||
function: spell
|
||||
clock:
|
||||
periodSec: 6000
|
172
aqua-tests/test_aqua.py
Normal file
172
aqua-tests/test_aqua.py
Normal file
@ -0,0 +1,172 @@
|
||||
import delegator
|
||||
import random
|
||||
import json
|
||||
import os
|
||||
import inspect
|
||||
from config import get_local
|
||||
|
||||
default_peers = json.loads(delegator.run(
|
||||
f"node ./getDefaultPeers.js", block=True).out)
|
||||
|
||||
|
||||
def get_relays():
|
||||
env = os.environ.get("FLUENCE_ENV")
|
||||
if env == "local":
|
||||
peers = get_local()
|
||||
else:
|
||||
if env is None:
|
||||
env = "testnet"
|
||||
peers = [peer["multiaddr"] for peer in default_peers[env]]
|
||||
|
||||
assert len(peers) != 0
|
||||
return peers
|
||||
|
||||
|
||||
relays = get_relays()
|
||||
peer_ids = [relay.split("/")[-1] for relay in relays]
|
||||
|
||||
|
||||
def get_random_list_item(ar):
|
||||
return ar[random.randint(0, len(ar) - 1)]
|
||||
|
||||
|
||||
def get_random_relay():
|
||||
return get_random_list_item(relays)
|
||||
|
||||
|
||||
def get_random_peer_id():
|
||||
return get_random_list_item(peer_ids)
|
||||
|
||||
|
||||
def get_label():
|
||||
return ''.join(random.choice('0123456789ABCDEF') for i in range(16))
|
||||
|
||||
|
||||
def run_aqua(func, args, relay=get_random_relay()):
|
||||
|
||||
# "a" : arg1, "b" : arg2 .....
|
||||
data = {chr(97 + i): arg for (i, arg) in enumerate(args)}
|
||||
call = f"{func}(" + ", ".join([chr(97 + i)
|
||||
for i in range(0, len(args))]) + ")"
|
||||
# inspect.stack method inspects the current execution stack as the name suggests
|
||||
# it's possible to infer that the minus 39th element of the stack always contains info
|
||||
# about the test function that is currently running. The third element is the function's name
|
||||
try:
|
||||
test_name = inspect.stack()[-39][3]
|
||||
except:
|
||||
# when running one test at a time, the stack is shorter so we need to use a different index
|
||||
test_name = inspect.stack()[-32][3]
|
||||
|
||||
command = f"fluence run -k {test_name} --relay {relay} -f '{call}' --data '{json.dumps(data)}' --quiet --particle-id"
|
||||
print(command)
|
||||
c = delegator.run(command, block=True)
|
||||
lines = c.out.splitlines()
|
||||
particle_id = lines[0] if len(lines) != 0 else ""
|
||||
|
||||
if len(c.err.strip()) != 0:
|
||||
print(f"{particle_id}\n{c.err}")
|
||||
|
||||
result = '\n'.join(lines[1:])
|
||||
|
||||
try:
|
||||
result = json.loads(result)
|
||||
print(result)
|
||||
return result
|
||||
except:
|
||||
print(result)
|
||||
return result
|
||||
|
||||
|
||||
def create_resource(label):
|
||||
result, error = run_aqua("createResource", [label])
|
||||
assert result != None, error
|
||||
return result
|
||||
|
||||
|
||||
def get_peer_id():
|
||||
return run_aqua("get_peer_id", [])
|
||||
|
||||
|
||||
def test_create_resource():
|
||||
label = get_label()
|
||||
result = create_resource(label)
|
||||
peer_id = get_peer_id()
|
||||
resource_id = run_aqua("getResourceId", [label, peer_id])
|
||||
assert result == resource_id
|
||||
|
||||
|
||||
def test_get_resource():
|
||||
label = get_label()
|
||||
resource_id = create_resource(label)
|
||||
peer_id = get_peer_id()
|
||||
result, error = run_aqua("getResource", [resource_id])
|
||||
assert result != None, error
|
||||
assert result["id"] == resource_id, error
|
||||
assert result["owner_peer_id"] == peer_id, error
|
||||
assert result["label"] == label, error
|
||||
|
||||
|
||||
def test_register_record_unregister():
|
||||
relay = get_random_relay()
|
||||
label = get_label()
|
||||
value = "some_value"
|
||||
peer_id = get_peer_id()
|
||||
service_id = "id"
|
||||
|
||||
resource_id = create_resource(label)
|
||||
result, error = run_aqua(
|
||||
"registerService", [resource_id, value, peer_id, service_id], relay)
|
||||
assert result, error
|
||||
|
||||
# we want at least 1 successful response
|
||||
result, error = run_aqua("resolveResource", [resource_id, 1], relay)
|
||||
assert result != None, error
|
||||
|
||||
assert len(result) == 1, "records not found"
|
||||
|
||||
record = result[0]
|
||||
assert record["metadata"]["key_id"] == resource_id
|
||||
assert record["metadata"]["issued_by"] == peer_id
|
||||
assert record["metadata"]["peer_id"] == peer_id
|
||||
assert record["metadata"]["service_id"] == [service_id]
|
||||
|
||||
result, error = run_aqua("unregisterService", [resource_id, peer_id],
|
||||
relay)
|
||||
assert result, error
|
||||
|
||||
result, error = run_aqua("resolveResource", [resource_id, 2], relay)
|
||||
assert result != None, error
|
||||
assert len(result) == 0
|
||||
|
||||
|
||||
def test_register_unregister_remote_record():
|
||||
relay = get_random_relay()
|
||||
label = get_label()
|
||||
value = "some_value"
|
||||
issuer_peer_id = get_peer_id()
|
||||
peer_id = get_random_peer_id()
|
||||
service_id = "id"
|
||||
|
||||
resource_id = create_resource(label)
|
||||
result, error = run_aqua(
|
||||
"registerService", [resource_id, value, peer_id, service_id], relay)
|
||||
assert result, error
|
||||
|
||||
result, error = run_aqua("resolveResource", [resource_id, 2], relay)
|
||||
assert result != None, error
|
||||
|
||||
assert len(result) == 1, "records not found"
|
||||
|
||||
record = result[0]
|
||||
assert record["metadata"]["key_id"] == resource_id
|
||||
assert record["metadata"]["issued_by"] == issuer_peer_id
|
||||
assert record["metadata"]["peer_id"] == peer_id
|
||||
assert record["metadata"]["service_id"] == [service_id]
|
||||
|
||||
result, error = run_aqua("unregisterService", [resource_id, peer_id],
|
||||
relay)
|
||||
assert result, error
|
||||
|
||||
result, error = run_aqua("resolveResource", [resource_id, 2], relay)
|
||||
assert result != None, error
|
||||
assert len(result) == 0
|
7
aqua-tests/test_fluence_cli_version.py
Normal file
7
aqua-tests/test_fluence_cli_version.py
Normal file
@ -0,0 +1,7 @@
|
||||
import delegator
|
||||
|
||||
|
||||
def test_fluence_cli_version():
|
||||
c = delegator.run(f"fluence --version", block=True)
|
||||
print(f"Fluence CLI version: {c.out}")
|
||||
assert True
|
@ -1,5 +0,0 @@
|
||||
# Aqua scripts for AquaDHT
|
||||
Implementation of PubSub over AquaDHT service and low-level bindings for AquaDHT service.
|
||||
|
||||
## Documentation
|
||||
See [Aqua Book](https://fluence.dev/aqua-book/libraries/aqua-dht).
|
8
aqua/constants.aqua
Normal file
8
aqua/constants.aqua
Normal file
@ -0,0 +1,8 @@
|
||||
aqua Constants declares *
|
||||
|
||||
-- the number of peers to which data will be replicated during the API call
|
||||
const INITIAL_REPLICATION_FACTOR = 1
|
||||
-- the minimal number of peers requested to obtain the data.
|
||||
const CONSISTENCY_LEVEL = 1
|
||||
-- default timeout for waiting for results
|
||||
const DEFAULT_TIMEOUT = 6000
|
69
aqua/misc.aqua
Normal file
69
aqua/misc.aqua
Normal file
@ -0,0 +1,69 @@
|
||||
aqua Misc declares *
|
||||
|
||||
import "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
import "registry-service.aqua"
|
||||
import "constants.aqua"
|
||||
|
||||
alias ResourceId: string
|
||||
alias Resource: Key
|
||||
alias Error: string
|
||||
|
||||
func wait(successful: *bool, len: i16, timeout: u16) -> bool:
|
||||
status: *string
|
||||
waiting = (arr: *bool, s: *string):
|
||||
join arr[len - 1]
|
||||
s <<- "ok"
|
||||
|
||||
waiting(successful, status)
|
||||
par status <- Peer.timeout(timeout, "timeout")
|
||||
|
||||
result: *bool
|
||||
stat = status!
|
||||
if stat == "ok":
|
||||
result <<- true
|
||||
else:
|
||||
result <<- false
|
||||
|
||||
<- result!
|
||||
|
||||
-- Get peers closest to the resource_id's hash in Kademlia network
|
||||
-- These peers are expected to store list of providers for this key
|
||||
func getNeighbors(resource_id: ResourceId) -> []PeerId:
|
||||
k <- Op.string_to_b58(resource_id)
|
||||
nodes <- Kademlia.neighborhood(k, nil, nil)
|
||||
<- nodes
|
||||
|
||||
func appendErrors(error1: *Error, error2: *Error):
|
||||
for e <- error2:
|
||||
error1 <<- e
|
||||
|
||||
func getResourceHelper(resource_id: ResourceId) -> ?Resource, *Error:
|
||||
nodes <- getNeighbors(resource_id)
|
||||
result: *Resource
|
||||
error: *Error
|
||||
|
||||
resources: *Key
|
||||
successful: *bool
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
get_result <- Registry.get_key_metadata(resource_id)
|
||||
if get_result.success:
|
||||
resources <<- get_result.key
|
||||
successful <<- true
|
||||
else:
|
||||
e <- Op.concat_strings(get_result.error, " on ")
|
||||
error <- Op.concat_strings(e, n)
|
||||
|
||||
success <- wait(successful, CONSISTENCY_LEVEL, DEFAULT_TIMEOUT)
|
||||
if success == false:
|
||||
error <<- "resource not found: timeout exceeded"
|
||||
else:
|
||||
merge_result <- Registry.merge_keys(resources)
|
||||
|
||||
if merge_result.success:
|
||||
result <<- merge_result.key
|
||||
else:
|
||||
error <<- merge_result.error
|
||||
|
||||
<- result, error
|
8196
aqua/package-lock.json
generated
8196
aqua/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -1,18 +1,17 @@
|
||||
{
|
||||
"name": "@fluencelabs/registry",
|
||||
"version": "0.3.1",
|
||||
"version": "0.9.4",
|
||||
"description": "Aqua Registry library",
|
||||
"files": [
|
||||
"*.aqua"
|
||||
],
|
||||
"dependencies": {
|
||||
"@fluencelabs/aqua-lib": "^0.4.3",
|
||||
"@fluencelabs/trust-graph": "^3.0.1"
|
||||
"@fluencelabs/aqua-lib": "0.9.1",
|
||||
"@fluencelabs/trust-graph": "0.4.11"
|
||||
},
|
||||
"scripts": {
|
||||
"compile-aqua": "aqua -i . -o ./target/typescript",
|
||||
"generate-aqua": "../service/build.sh",
|
||||
"build": "npm run compile-aqua"
|
||||
"build": "fluence aqua -i . --dry"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@ -28,8 +27,5 @@
|
||||
"bugs": {
|
||||
"url": "https://github.com/fluencelabs/registry/issues"
|
||||
},
|
||||
"homepage": "https://github.com/fluencelabs/registry",
|
||||
"devDependencies": {
|
||||
"@fluencelabs/aqua": "^0.7.0-285"
|
||||
}
|
||||
"homepage": "https://github.com/fluencelabs/registry"
|
||||
}
|
||||
|
@ -1,45 +1,80 @@
|
||||
import "registry.aqua"
|
||||
import PeerId, Peer, Sig from "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
aqua RegistryApi declares *
|
||||
|
||||
export getKeySignature, getRecordMetadata
|
||||
export getRecordSignature, getTombstoneSignature
|
||||
export registerKey, putRecord, addTombstone
|
||||
export getKeyMetadata, republishKey
|
||||
|
||||
import "registry-service.aqua"
|
||||
import PeerId, Peer, Sig, SignResult from "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
import "@fluencelabs/trust-graph/trust-graph.aqua"
|
||||
|
||||
func get_route_signature(label: string, timestamp_created: u64) -> []u8:
|
||||
on HOST_PEER_ID:
|
||||
bytes <- Registry.get_route_bytes(label, nil, timestamp_created, nil, "")
|
||||
signature <- Sig.sign(bytes)
|
||||
<- signature.signature!
|
||||
|
||||
func get_record_signature(route_id: string, value: string, relay_id: ?PeerId, service_id: ?string, timestamp_created: u64) -> []u8:
|
||||
on HOST_PEER_ID:
|
||||
bytes <- Registry.get_record_bytes(route_id, value, relay_id, service_id, timestamp_created, nil)
|
||||
signature <- Sig.sign(bytes)
|
||||
<- signature.signature!
|
||||
|
||||
func get_host_record_signature(route_id: string, value: string, relay_id: ?PeerId, service_id: ?string, timestamp_created: u64) -> []u8:
|
||||
on HOST_PEER_ID:
|
||||
bytes <- Registry.get_host_record_bytes(route_id, value, relay_id, service_id, timestamp_created, nil)
|
||||
signature <- Sig.sign(bytes)
|
||||
<- signature.signature!
|
||||
|
||||
func register_route(label: string, timestamp_created: u64, signature: []u8, pin: bool) -> RegisterRouteResult:
|
||||
t <- Peer.timestamp_sec()
|
||||
weight <- TrustGraph.get_weight(%init_peer_id%, t)
|
||||
result <- Registry.register_route(label, nil, timestamp_created, nil, "", signature, pin, weight, t)
|
||||
func getKeySignature(label: string, timestamp_created: u64) -> SignResult:
|
||||
bytes <- Registry.get_key_bytes(label, nil, timestamp_created, nil, "")
|
||||
on INIT_PEER_ID via HOST_PEER_ID:
|
||||
result <- Sig.sign(bytes)
|
||||
<- result
|
||||
|
||||
func put_record(route_id: string, value: string, relay_id: ?PeerId, service_id: []string, timestamp_created: u64, signature: []u8) -> DhtResult:
|
||||
func getRecordMetadata(key_id: string, value: string, peer_id: string, relay_id: []string, service_id: []string, solution: []u8) -> ?RecordMetadata, ?string:
|
||||
t <- Peer.timestamp_sec()
|
||||
weight <- TrustGraph.get_weight(%init_peer_id%, t)
|
||||
result <- Registry.put_record(route_id, value, relay_id, service_id, timestamp_created, nil, signature, weight, t)
|
||||
bytes <- Registry.get_record_metadata_bytes(key_id, INIT_PEER_ID, t, value, peer_id, relay_id, service_id, solution)
|
||||
|
||||
on INIT_PEER_ID via HOST_PEER_ID:
|
||||
sig_result <- Sig.sign(bytes)
|
||||
|
||||
result: *RecordMetadata
|
||||
error: *string
|
||||
if sig_result.success == true:
|
||||
result <- Registry.create_record_metadata(key_id, INIT_PEER_ID, t, value, peer_id, relay_id, service_id, solution, sig_result.signature!)
|
||||
else:
|
||||
error <<- sig_result.error!
|
||||
|
||||
<- result, error
|
||||
|
||||
func getRecordSignature(metadata: RecordMetadata, timestamp_created: u64) -> SignResult:
|
||||
signature: *SignResult
|
||||
|
||||
if metadata.peer_id != INIT_PEER_ID:
|
||||
on metadata.peer_id via HOST_PEER_ID:
|
||||
bytes <- Registry.get_record_bytes(metadata, timestamp_created)
|
||||
signature <- Sig.sign(bytes)
|
||||
else:
|
||||
on HOST_PEER_ID:
|
||||
bytess <- Registry.get_record_bytes(metadata, timestamp_created)
|
||||
on INIT_PEER_ID:
|
||||
signature <- Sig.sign(bytess)
|
||||
|
||||
<- signature!
|
||||
|
||||
func getTombstoneSignature(key_id: string, peer_id: string, timestamp_issued: u64, solution: []u8) -> SignResult:
|
||||
bytes <- Registry.get_tombstone_bytes(key_id, INIT_PEER_ID, peer_id, timestamp_issued, solution)
|
||||
on INIT_PEER_ID via HOST_PEER_ID:
|
||||
result <- Sig.sign(bytes)
|
||||
<- result
|
||||
|
||||
func put_host_record(route_id: string, value: string, relay_id: ?PeerId, service_id: []string, timestamp_created: u64, signature: []u8) -> PutHostRecordResult:
|
||||
func registerKey(label: string, timestamp_created: u64, signature: []u8) -> RegisterKeyResult:
|
||||
t <- Peer.timestamp_sec()
|
||||
weight <- TrustGraph.get_weight(%init_peer_id%, t)
|
||||
result <- Registry.put_host_record(route_id, value, relay_id, service_id, timestamp_created, nil, signature, weight, t)
|
||||
result <- Registry.register_key(label, nil, timestamp_created, nil, "", signature, weight, t)
|
||||
<- result
|
||||
|
||||
func propagate_host_record(res: PutHostRecordResult) -> DhtResult:
|
||||
func putRecord(metadata: RecordMetadata, timestamp_created: u64, signature: []u8) -> RegistryResult:
|
||||
t <- Peer.timestamp_sec()
|
||||
weight <- TrustGraph.get_weight(%init_peer_id%, t)
|
||||
result <- Registry.propagate_host_record(res, t, weight)
|
||||
weight <- TrustGraph.get_weight(metadata.issued_by, t)
|
||||
result <- Registry.put_record(metadata, timestamp_created, signature, weight, t)
|
||||
<- result
|
||||
|
||||
func addTombstone(key_id: string, peer_id: string, timestamp_issued: u64, solution: []u8, signature: []u8) -> RegistryResult:
|
||||
t <- Peer.timestamp_sec()
|
||||
result <- Registry.add_tombstone(key_id, INIT_PEER_ID, peer_id, timestamp_issued, solution, signature, t)
|
||||
<- result
|
||||
|
||||
func getKeyMetadata(key_id: string) -> GetKeyMetadataResult:
|
||||
result <- Registry.get_key_metadata(key_id)
|
||||
<- result
|
||||
|
||||
func republishKey(key: Key) -> RegistryResult:
|
||||
t <- Peer.timestamp_sec()
|
||||
weight <- TrustGraph.get_weight(key.owner_peer_id, t)
|
||||
result <- Registry.republish_key(key, weight, t)
|
||||
<- result
|
||||
|
@ -1,8 +1,9 @@
|
||||
module Registry.Scheduled declares *
|
||||
aqua Registry.Scheduled declares *
|
||||
|
||||
export clearExpired_86400, replicate_3600
|
||||
export clearExpired_86400, replicate_3600, renew_43200
|
||||
|
||||
import "registry.aqua"
|
||||
import "registry-service.aqua"
|
||||
import "registry-api.aqua"
|
||||
import "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
import "@fluencelabs/trust-graph/trust-graph.aqua"
|
||||
|
||||
@ -12,21 +13,30 @@ func clearExpired_86400():
|
||||
t <- Peer.timestamp_sec()
|
||||
Registry.clear_expired(t)
|
||||
|
||||
-- update stale local records
|
||||
func renew_43200():
|
||||
on HOST_PEER_ID:
|
||||
t <- Peer.timestamp_sec()
|
||||
res <- Registry.get_stale_local_records(t)
|
||||
for r <- res.result par:
|
||||
signature <- getRecordSignature(r.metadata, t)
|
||||
putRecord(r.metadata, t, signature.signature!)
|
||||
|
||||
-- get all old records and replicate it by routes
|
||||
func replicate_3600():
|
||||
on HOST_PEER_ID:
|
||||
t <- Peer.timestamp_sec()
|
||||
res <- Registry.evict_stale(t)
|
||||
for r <- res.results par:
|
||||
k <- Op.string_to_b58(r.route.id)
|
||||
k <- Op.string_to_b58(r.key.id)
|
||||
nodes <- Kademlia.neighborhood(k, nil, nil)
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
tt <- Peer.timestamp_sec()
|
||||
key_weight <- TrustGraph.get_weight(r.route.peer_id, tt)
|
||||
Registry.republish_route(r.route, key_weight, tt)
|
||||
key_weight <- TrustGraph.get_weight(r.key.owner_peer_id, tt)
|
||||
Registry.republish_key(r.key, key_weight, tt)
|
||||
|
||||
records_weights: *WeightResult
|
||||
for record <- r.records:
|
||||
records_weights <- TrustGraph.get_weight(record.peer_id, tt)
|
||||
records_weights <- TrustGraph.get_weight(record.metadata.issued_by, tt)
|
||||
Registry.republish_records(r.records, records_weights, tt)
|
||||
|
121
aqua/registry-service.aqua
Normal file
121
aqua/registry-service.aqua
Normal file
@ -0,0 +1,121 @@
|
||||
aqua Registry declares *
|
||||
|
||||
data ClearExpiredResult:
|
||||
success: bool
|
||||
error: string
|
||||
count_keys: u64
|
||||
count_records: u64
|
||||
count_tombstones: u64
|
||||
|
||||
data Key:
|
||||
id: string
|
||||
label: string
|
||||
owner_peer_id: string
|
||||
timestamp_created: u64
|
||||
challenge: []u8
|
||||
challenge_type: string
|
||||
signature: []u8
|
||||
|
||||
data RecordMetadata:
|
||||
key_id: string
|
||||
issued_by: string
|
||||
peer_id: string
|
||||
timestamp_issued: u64
|
||||
solution: []u8
|
||||
value: string
|
||||
relay_id: []string
|
||||
service_id: []string
|
||||
issuer_signature: []u8
|
||||
|
||||
data Record:
|
||||
metadata: RecordMetadata
|
||||
timestamp_created: u64
|
||||
signature: []u8
|
||||
|
||||
data Tombstone:
|
||||
key_id: string
|
||||
issued_by: string
|
||||
peer_id: string
|
||||
timestamp_issued: u64
|
||||
solution: []u8
|
||||
issuer_signature: []u8
|
||||
|
||||
data EvictStaleItem:
|
||||
key: Key
|
||||
records: []Record
|
||||
tombstones: []Tombstone
|
||||
|
||||
data EvictStaleResult:
|
||||
success: bool
|
||||
error: string
|
||||
results: []EvictStaleItem
|
||||
|
||||
data GetKeyMetadataResult:
|
||||
success: bool
|
||||
error: string
|
||||
key: Key
|
||||
|
||||
data GetRecordsResult:
|
||||
success: bool
|
||||
error: string
|
||||
result: []Record
|
||||
|
||||
data GetTombstonesResult:
|
||||
success: bool
|
||||
error: string
|
||||
result: []Tombstone
|
||||
|
||||
data MergeKeysResult:
|
||||
success: bool
|
||||
error: string
|
||||
key: Key
|
||||
|
||||
data MergeResult:
|
||||
success: bool
|
||||
error: string
|
||||
result: []Record
|
||||
|
||||
data RegisterKeyResult:
|
||||
success: bool
|
||||
error: string
|
||||
key_id: string
|
||||
|
||||
data RegistryResult:
|
||||
success: bool
|
||||
error: string
|
||||
|
||||
data RepublishRecordsResult:
|
||||
success: bool
|
||||
error: string
|
||||
updated: u64
|
||||
|
||||
data WeightResult:
|
||||
success: bool
|
||||
weight: u32
|
||||
peer_id: string
|
||||
error: string
|
||||
|
||||
service Registry("registry"):
|
||||
add_tombstone(key_id: string, issued_by: string, peer_id: string, timestamp_issued: u64, solution: []u8, signature: []u8, current_timestamp_sec: u64) -> RegistryResult
|
||||
clear_expired(current_timestamp_sec: u64) -> ClearExpiredResult
|
||||
create_record_metadata(key_id: string, issued_by: string, timestamp_issued: u64, value: string, peer_id: string, relay_id: []string, service_id: []string, solution: []u8, signature: []u8) -> RecordMetadata
|
||||
evict_stale(current_timestamp_sec: u64) -> EvictStaleResult
|
||||
get_key_bytes(label: string, owner_peer_id: []string, timestamp_created: u64, challenge: []u8, challenge_type: string) -> []u8
|
||||
get_key_id(label: string, peer_id: string) -> string
|
||||
get_key_metadata(key_id: string) -> GetKeyMetadataResult
|
||||
get_record_bytes(metadata: RecordMetadata, timestamp_created: u64) -> []u8
|
||||
get_record_metadata_bytes(key_id: string, issued_by: string, timestamp_issued: u64, value: string, peer_id: string, relay_id: []string, service_id: []string, solution: []u8) -> []u8
|
||||
get_records(key_id: string, current_timestamp_sec: u64) -> GetRecordsResult
|
||||
get_stale_local_records(current_timestamp_sec: u64) -> GetRecordsResult
|
||||
get_tombstone_bytes(key_id: string, issued_by: string, peer_id: string, timestamp_issued: u64, solution: []u8) -> []u8
|
||||
get_tombstones(key_id: string, current_timestamp_sec: u64) -> GetTombstonesResult
|
||||
merge(records: [][]Record) -> MergeResult
|
||||
merge_keys(keys: []Key) -> MergeKeysResult
|
||||
merge_two(a: []Record, b: []Record) -> MergeResult
|
||||
put_record(metadata: RecordMetadata, timestamp_created: u64, signature: []u8, weight: WeightResult, current_timestamp_sec: u64) -> RegistryResult
|
||||
register_key(label: string, owner_peer_id: []string, timestamp_created: u64, challenge: []u8, challenge_type: string, signature: []u8, weight: WeightResult, current_timestamp_sec: u64) -> RegisterKeyResult
|
||||
republish_key(key: Key, weight: WeightResult, current_timestamp_sec: u64) -> RegistryResult
|
||||
republish_records(records: []Record, weights: []WeightResult, current_timestamp_sec: u64) -> RepublishRecordsResult
|
||||
republish_tombstones(tombstones: []Tombstone, current_timestamp_sec: u64) -> RegistryResult
|
||||
set_expired_timeout(timeout_sec: u64)
|
||||
set_stale_timeout(timeout_sec: u64)
|
@ -1,97 +0,0 @@
|
||||
module Registry declares *
|
||||
|
||||
data ClearExpiredResult:
|
||||
success: bool
|
||||
error: string
|
||||
count_routes: u64
|
||||
count_values: u64
|
||||
|
||||
data DhtResult:
|
||||
success: bool
|
||||
error: string
|
||||
|
||||
data Route:
|
||||
id: string
|
||||
label: string
|
||||
peer_id: string
|
||||
timestamp_created: u64
|
||||
challenge: []u8
|
||||
challenge_type: string
|
||||
signature: []u8
|
||||
|
||||
data Record:
|
||||
route_id: string
|
||||
value: string
|
||||
peer_id: string
|
||||
set_by: string
|
||||
relay_id: []string
|
||||
service_id: []string
|
||||
timestamp_created: u64
|
||||
solution: []u8
|
||||
signature: []u8
|
||||
|
||||
data EvictStaleItem:
|
||||
route: Route
|
||||
records: []Record
|
||||
|
||||
data EvictStaleResult:
|
||||
success: bool
|
||||
error: string
|
||||
results: []EvictStaleItem
|
||||
|
||||
data GetRouteMetadataResult:
|
||||
success: bool
|
||||
error: string
|
||||
route: Route
|
||||
|
||||
data GetValuesResult:
|
||||
success: bool
|
||||
error: string
|
||||
result: []Record
|
||||
|
||||
data MergeResult:
|
||||
success: bool
|
||||
error: string
|
||||
result: []Record
|
||||
|
||||
data PutHostRecordResult:
|
||||
success: bool
|
||||
error: string
|
||||
value: []Record
|
||||
|
||||
data RegisterRouteResult:
|
||||
success: bool
|
||||
error: string
|
||||
route_id: string
|
||||
|
||||
data RepublishValuesResult:
|
||||
success: bool
|
||||
error: string
|
||||
updated: u64
|
||||
|
||||
data WeightResult:
|
||||
success: bool
|
||||
weight: u32
|
||||
peer_id: string
|
||||
error: string
|
||||
|
||||
service Registry("registry"):
|
||||
clear_expired(current_timestamp_sec: u64) -> ClearExpiredResult
|
||||
clear_host_record(route_id: string, current_timestamp_sec: u64) -> DhtResult
|
||||
evict_stale(current_timestamp_sec: u64) -> EvictStaleResult
|
||||
get_host_record_bytes(route_id: string, value: string, relay_id: []string, service_id: []string, timestamp_created: u64, solution: []u8) -> []u8
|
||||
get_record_bytes(route_id: string, value: string, relay_id: []string, service_id: []string, timestamp_created: u64, solution: []u8) -> []u8
|
||||
get_records(route_id: string, current_timestamp_sec: u64) -> GetValuesResult
|
||||
get_route_bytes(label: string, peer_id: []string, timestamp_created: u64, challenge: []u8, challenge_type: string) -> []u8
|
||||
get_route_id(label: string, peer_id: string) -> string
|
||||
get_route_metadata(route_id: string, current_timestamp_sec: u64) -> GetRouteMetadataResult
|
||||
merge(records: [][]Record) -> MergeResult
|
||||
merge_two(a: []Record, b: []Record) -> MergeResult
|
||||
propagate_host_record(set_host_value: PutHostRecordResult, current_timestamp_sec: u64, weight: WeightResult) -> DhtResult
|
||||
put_host_record(route_id: string, value: string, relay_id: []string, service_id: []string, timestamp_created: u64, solution: []u8, signature: []u8, weight: WeightResult, current_timestamp_sec: u64) -> PutHostRecordResult
|
||||
put_record(route_id: string, value: string, relay_id: []string, service_id: []string, timestamp_created: u64, solution: []u8, signature: []u8, weight: WeightResult, current_timestamp_sec: u64) -> DhtResult
|
||||
register_route(label: string, peer_id: []string, timestamp_created: u64, challenge: []u8, challenge_type: string, signature: []u8, pin: bool, weight: WeightResult, current_timestamp_sec: u64) -> RegisterRouteResult
|
||||
republish_records(records: []Record, weights: []WeightResult, current_timestamp_sec: u64) -> RepublishValuesResult
|
||||
republish_route(route: Route, weight: WeightResult, current_timestamp_sec: u64) -> DhtResult
|
||||
set_expired_timeout(timeout_sec: u64)
|
||||
set_stale_timeout(timeout_sec: u64)
|
206
aqua/resources-api.aqua
Normal file
206
aqua/resources-api.aqua
Normal file
@ -0,0 +1,206 @@
|
||||
aqua Registry.ResourcesAPI declares *
|
||||
|
||||
import "registry-service.aqua"
|
||||
import "registry-api.aqua"
|
||||
import "misc.aqua"
|
||||
import "constants.aqua"
|
||||
import "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
|
||||
func getResource(resource_id: ResourceId) -> ?Resource, *Error:
|
||||
on HOST_PEER_ID:
|
||||
result, error <- getResourceHelper(resource_id)
|
||||
<- result, error
|
||||
|
||||
func getResourceId(label: string, peer_id: string) -> ResourceId:
|
||||
on HOST_PEER_ID:
|
||||
resource_id <- Registry.get_key_id(label, peer_id)
|
||||
<- resource_id
|
||||
|
||||
-- Create a resource: register it on the closest peers
|
||||
func createResource(label: string) -> ?ResourceId, *Error:
|
||||
t <- Peer.timestamp_sec()
|
||||
|
||||
resource_id: *ResourceId
|
||||
error: *Error
|
||||
on HOST_PEER_ID:
|
||||
sig_result <- getKeySignature(label, t)
|
||||
if sig_result.success == false:
|
||||
error <<- sig_result.error!
|
||||
else:
|
||||
signature = sig_result.signature!
|
||||
id <- Registry.get_key_id(label, INIT_PEER_ID)
|
||||
nodes <- getNeighbors(id)
|
||||
|
||||
successful: *bool
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
res <- registerKey(label, t, signature)
|
||||
|
||||
if res.success:
|
||||
successful <<- true
|
||||
else:
|
||||
error <<- res.error
|
||||
|
||||
success <- wait(successful, INITIAL_REPLICATION_FACTOR, DEFAULT_TIMEOUT)
|
||||
|
||||
if success == false:
|
||||
error <<- "resource wasn't created: timeout exceeded"
|
||||
else:
|
||||
resource_id <<- id
|
||||
|
||||
<- resource_id, error
|
||||
|
||||
-- Note: resource must be already created
|
||||
func registerService(resource_id: ResourceId, value: string, peer_id: PeerId, service_id: ?string) -> bool, *Error:
|
||||
relay_id: *string
|
||||
if peer_id == INIT_PEER_ID:
|
||||
relay_id <<- HOST_PEER_ID
|
||||
|
||||
success: *bool
|
||||
error: *Error
|
||||
|
||||
on HOST_PEER_ID:
|
||||
metadata, err <- getRecordMetadata(resource_id, value, peer_id, relay_id, service_id, nil)
|
||||
if metadata == nil:
|
||||
success <<- false
|
||||
error <<- err!
|
||||
else:
|
||||
t <- Peer.timestamp_sec()
|
||||
sig_result = getRecordSignature(metadata!, t)
|
||||
if sig_result.success == false:
|
||||
error <<- sig_result.error!
|
||||
success <<- false
|
||||
else:
|
||||
key, error_get <- getResourceHelper(resource_id)
|
||||
if key == nil:
|
||||
appendErrors(error, error_get)
|
||||
success <<- false
|
||||
else:
|
||||
if peer_id != INIT_PEER_ID:
|
||||
on peer_id via HOST_PEER_ID:
|
||||
republish_result <- republishKey(key!)
|
||||
if republish_result.success == false:
|
||||
error <<- republish_result.error
|
||||
else:
|
||||
p_res <- putRecord(metadata!, t, sig_result.signature!)
|
||||
if p_res.success == false:
|
||||
error <<- p_res.error
|
||||
success <<- false
|
||||
|
||||
nodes <- getNeighbors(resource_id)
|
||||
successful: *bool
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
republish_res <- republishKey(key!)
|
||||
if republish_res.success == false:
|
||||
error <<- republish_res.error
|
||||
else:
|
||||
put_res <- putRecord(metadata!, t, sig_result.signature!)
|
||||
if put_res.success:
|
||||
successful <<- true
|
||||
else:
|
||||
error <<- put_res.error
|
||||
success <- wait(successful, INITIAL_REPLICATION_FACTOR, DEFAULT_TIMEOUT)
|
||||
|
||||
succ = success!
|
||||
if succ == false:
|
||||
error <<- "service hasn't registered: timeout exceeded"
|
||||
|
||||
<- succ, error
|
||||
|
||||
|
||||
func unregisterService(resource_id: ResourceId, peer_id: PeerId) -> bool, *Error:
|
||||
success: *bool
|
||||
error: *Error
|
||||
|
||||
on HOST_PEER_ID:
|
||||
t <- Peer.timestamp_sec()
|
||||
sig_result = getTombstoneSignature(resource_id, peer_id, t, nil)
|
||||
if sig_result.success == false:
|
||||
error <<- sig_result.error!
|
||||
success <<- false
|
||||
else:
|
||||
key, error_get <- getResourceHelper(resource_id)
|
||||
if key == nil:
|
||||
appendErrors(error, error_get)
|
||||
success <<- false
|
||||
else:
|
||||
|
||||
if peer_id != INIT_PEER_ID:
|
||||
on peer_id:
|
||||
republish_result <- republishKey(key!)
|
||||
if republish_result.success == false:
|
||||
error <<- republish_result.error
|
||||
else:
|
||||
res <- addTombstone(resource_id, peer_id, t, nil, sig_result.signature!)
|
||||
if res.success == false:
|
||||
error <<- res.error
|
||||
success <<- false
|
||||
|
||||
nodes <- getNeighbors(resource_id)
|
||||
successful: *bool
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
republish_res <- republishKey(key!)
|
||||
if republish_res.success == false:
|
||||
error <<- republish_res.error
|
||||
else:
|
||||
add_res <- addTombstone(resource_id, peer_id, t, nil, sig_result.signature!)
|
||||
if add_res.success:
|
||||
successful <<- true
|
||||
else:
|
||||
error <<- add_res.error
|
||||
success <- wait(successful, INITIAL_REPLICATION_FACTOR, DEFAULT_TIMEOUT)
|
||||
|
||||
succ = success!
|
||||
if succ == false:
|
||||
error <<- "unregisterService failed: timeout exceeded"
|
||||
|
||||
<- succ, error
|
||||
|
||||
func resolveResource(resource_id: ResourceId, ack: i16) -> ?[]Record, *Error:
|
||||
on HOST_PEER_ID:
|
||||
nodes <- getNeighbors(resource_id)
|
||||
result: *[]Record
|
||||
records: *[]Record
|
||||
error: *Error
|
||||
successful: *bool
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
t <- Peer.timestamp_sec()
|
||||
get_result <- Registry.get_records(resource_id, t)
|
||||
if get_result.success:
|
||||
records <<- get_result.result
|
||||
successful <<- true
|
||||
else:
|
||||
error <<- get_result.error
|
||||
|
||||
success <- wait(successful, ack, DEFAULT_TIMEOUT)
|
||||
if success == false:
|
||||
error <<- "timeout exceeded"
|
||||
else:
|
||||
merged <- Registry.merge(records)
|
||||
if merged.success == false:
|
||||
error <<- merged.error
|
||||
else:
|
||||
result <<- merged.result
|
||||
<- result, error
|
||||
|
||||
-- Execute the given call on providers
|
||||
-- Note that you can provide another Aqua function as an argument to this one
|
||||
func executeOnResource(resource_id: ResourceId, ack: i16, call: Record -> ()) -> bool, *Error:
|
||||
success: *bool
|
||||
result, error <- resolveResource(resource_id, ack)
|
||||
|
||||
if result == nil:
|
||||
success <<- false
|
||||
else:
|
||||
for r <- result! par:
|
||||
on r.metadata.peer_id via r.metadata.relay_id:
|
||||
call(r)
|
||||
success <<- true
|
||||
<- success!, error
|
@ -1,174 +0,0 @@
|
||||
module Registry.Routing declares *
|
||||
|
||||
import "registry.aqua"
|
||||
import "registry-api.aqua"
|
||||
import "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
|
||||
alias RouteId: string
|
||||
|
||||
func get_route_id(label: string, peer_id: string) -> RouteId:
|
||||
route_id <- Registry.get_route_id(label, peer_id)
|
||||
<- route_id
|
||||
|
||||
-- Get peers closest to the label's hash in Kademlia network
|
||||
-- These peers are expected to store list of subscribers of this label
|
||||
func getNeighbours(route_id: string) -> []PeerId:
|
||||
k <- Op.string_to_b58(route_id)
|
||||
nodes <- Kademlia.neighborhood(k, nil, nil)
|
||||
<- nodes
|
||||
|
||||
-- If this peer have set node_id as a subscriber for label,
|
||||
-- this call will prevent subscriber from re-subscribing
|
||||
-- so that eventually it will disappear from the subscribers list
|
||||
func removeFromRoute(route_id: string):
|
||||
on HOST_PEER_ID:
|
||||
t <- Peer.timestamp_sec()
|
||||
Registry.clear_host_record(route_id, t)
|
||||
|
||||
-- Create a route: register it on the closest peers
|
||||
func createRoute(label: string) -> RouteId:
|
||||
t <- Peer.timestamp_sec()
|
||||
signature <- get_route_signature(label, t)
|
||||
|
||||
on HOST_PEER_ID:
|
||||
route_id <- get_route_id(label, INIT_PEER_ID)
|
||||
nodes <- getNeighbours(route_id)
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
result <- register_route(label, t, signature, false)
|
||||
<- route_id
|
||||
|
||||
-- Create a label and subscribe to it
|
||||
-- INIT_PEER_ID (current client) will become a subscriber
|
||||
func createRouteAndRegister(label: string, value: string, service_id: ?string) -> string:
|
||||
relay_id: ?string
|
||||
relay_id <<- HOST_PEER_ID
|
||||
|
||||
t <- Peer.timestamp_sec()
|
||||
route_signature <- get_route_signature(label, t)
|
||||
on HOST_PEER_ID:
|
||||
route_id <- get_route_id(label, INIT_PEER_ID)
|
||||
record_signature <- get_record_signature(route_id, value, relay_id, service_id, t)
|
||||
|
||||
on HOST_PEER_ID:
|
||||
|
||||
nodes <- getNeighbours(route_id)
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
register_route(label, t, route_signature, false)
|
||||
put_record(route_id, value, relay_id, service_id, t, record_signature)
|
||||
<- route_id
|
||||
|
||||
-- Create a label and subscribe to it
|
||||
-- INIT_PEER_ID (current client) will become a subscriber
|
||||
-- In contrast with non-blocking version, waits for at least a single write to succeed
|
||||
func createRouteAndRegisterBlocking(
|
||||
label: string, value: string,
|
||||
service_id: ?string,
|
||||
progress: string -> (),
|
||||
ack: i16
|
||||
) -> string:
|
||||
relay_id: ?string
|
||||
relay_id <<- HOST_PEER_ID
|
||||
|
||||
t <- Peer.timestamp_sec()
|
||||
route_signature <- get_route_signature(label, t)
|
||||
on HOST_PEER_ID:
|
||||
route_id <- get_route_id(label, INIT_PEER_ID)
|
||||
record_signature <- get_record_signature(route_id, value, relay_id, service_id, t)
|
||||
|
||||
results: *DhtResult
|
||||
on HOST_PEER_ID:
|
||||
nodes <- getNeighbours(route_id)
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
res1 <- register_route(label, t, route_signature, false)
|
||||
result <- put_record(route_id, value, relay_id, service_id, t, record_signature)
|
||||
if result.success:
|
||||
results <<- result
|
||||
progress(n)
|
||||
join results[ack]
|
||||
<- route_id
|
||||
|
||||
-- Create a label and make the given node a subscriber to it
|
||||
func createRouteAndRegisterNode(subscriber_node_id: PeerId, label: string, value: string, service_id: ?string) -> string:
|
||||
t <- Peer.timestamp_sec()
|
||||
route_signature <- get_route_signature(label, t)
|
||||
on HOST_PEER_ID:
|
||||
route_id <- get_route_id(label, INIT_PEER_ID)
|
||||
|
||||
record_signature <- get_host_record_signature(route_id, value, nil, service_id, t)
|
||||
|
||||
on subscriber_node_id:
|
||||
register_route(label, t, route_signature, false)
|
||||
r <- put_host_record(route_id, value, nil, service_id, t, record_signature)
|
||||
nodes <- getNeighbours(route_id)
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
register_route(label, t, route_signature, false)
|
||||
propagate_host_record(r)
|
||||
<- route_id
|
||||
|
||||
-- Subscribe to a label
|
||||
-- Note: label must be already initiated
|
||||
func registerForRoute(route_id: string, value: string, service_id: ?string):
|
||||
relay_id: ?string
|
||||
relay_id <<- HOST_PEER_ID
|
||||
|
||||
t <- Peer.timestamp_sec()
|
||||
record_signature <- get_record_signature(route_id, value, relay_id, service_id, t)
|
||||
|
||||
on HOST_PEER_ID:
|
||||
nodes <- getNeighbours(route_id)
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
put_record(route_id, value, relay_id, service_id, t, record_signature)
|
||||
|
||||
|
||||
-- Subscribe a node to the given label
|
||||
-- Note: label must be already initiated
|
||||
func registerForRouteNode(subscriber_node_id: PeerId, label: string, value: string, service_id: ?string):
|
||||
t <- Peer.timestamp_sec()
|
||||
route_signature <- get_route_signature(label, t)
|
||||
on HOST_PEER_ID:
|
||||
route_id <- get_route_id(label, INIT_PEER_ID)
|
||||
record_signature <- get_host_record_signature(route_id, value, nil, service_id, t)
|
||||
|
||||
on subscriber_node_id:
|
||||
r <- put_host_record(route_id, value, nil, service_id, t, record_signature)
|
||||
nodes <- getNeighbours(route_id)
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
register_route(label, t, route_signature, false)
|
||||
propagate_host_record(r)
|
||||
|
||||
-- Find the list of record for the given route_id
|
||||
func resolveRoute(route_id: string, ack: i16) -> []Record:
|
||||
on HOST_PEER_ID:
|
||||
nodes <- getNeighbours(route_id)
|
||||
res: *[]Record
|
||||
for n <- nodes par:
|
||||
on n:
|
||||
try:
|
||||
t <- Peer.timestamp_sec()
|
||||
get_result <- Registry.get_records(route_id, t)
|
||||
res <<- get_result.result
|
||||
|
||||
join res[ack]
|
||||
--par Peer.timeout(100000000, "timeout")
|
||||
result <- Registry.merge(res)
|
||||
<- result.result
|
||||
|
||||
-- Execute the given code on subscribers
|
||||
-- Note that you can provide another Aqua function as an argument to this one
|
||||
func executeOnRoute(route_id: string, ack: i16, call: Record -> ()):
|
||||
subs <- resolveRoute(route_id, ack)
|
||||
for r <- subs par:
|
||||
on r.peer_id via r.relay_id:
|
||||
call(r)
|
744
aqua/target/typescript/misc.ts
Normal file
744
aqua/target/typescript/misc.ts
Normal file
@ -0,0 +1,744 @@
|
||||
/* eslint-disable */
|
||||
// @ts-nocheck
|
||||
/**
|
||||
*
|
||||
* This file is auto-generated. Do not edit manually: changes may be erased.
|
||||
* Generated by Aqua compiler: https://github.com/fluencelabs/aqua/.
|
||||
* If you find any bugs, please write an issue on GitHub: https://github.com/fluencelabs/aqua/issues
|
||||
* Aqua version: 0.11.9-release-please-1c9388a-1275-1
|
||||
*
|
||||
*/
|
||||
import type { IFluenceClient as IFluenceClient$$, CallParams as CallParams$$ } from '@fluencelabs/js-client.api';
|
||||
import {
|
||||
v5_callFunction as callFunction$$,
|
||||
v5_registerService as registerService$$,
|
||||
} from '@fluencelabs/js-client.api';
|
||||
|
||||
|
||||
|
||||
// Services
|
||||
|
||||
// Functions
|
||||
export const getResourceHelper_script = `
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("getDataSrv" "-relay-") [] -relay-)
|
||||
(call %init_peer_id% ("getDataSrv" "resource_id") [] resource_id)
|
||||
)
|
||||
(xor
|
||||
(new $resources
|
||||
(new $successful
|
||||
(new $result
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("op" "string_to_b58") [resource_id] k)
|
||||
(call %init_peer_id% ("kad" "neighborhood") [k [] []] nodes)
|
||||
)
|
||||
(par
|
||||
(fold nodes n-0
|
||||
(par
|
||||
(xor
|
||||
(seq
|
||||
(new $-ephemeral-stream-
|
||||
(new #-ephemeral-canon-
|
||||
(canon -relay- $-ephemeral-stream- #-ephemeral-canon-)
|
||||
)
|
||||
)
|
||||
(xor
|
||||
(seq
|
||||
(call n-0 ("registry" "get_key_metadata") [resource_id] get_result)
|
||||
(xor
|
||||
(seq
|
||||
(seq
|
||||
(match get_result.$.success true
|
||||
(seq
|
||||
(ap get_result.$.key $resources)
|
||||
(ap true $successful)
|
||||
)
|
||||
)
|
||||
(new $-ephemeral-stream-
|
||||
(new #-ephemeral-canon-
|
||||
(canon -relay- $-ephemeral-stream- #-ephemeral-canon-)
|
||||
)
|
||||
)
|
||||
)
|
||||
(new $-ephemeral-stream-
|
||||
(new #-ephemeral-canon-
|
||||
(canon %init_peer_id% $-ephemeral-stream- #-ephemeral-canon-)
|
||||
)
|
||||
)
|
||||
)
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call n-0 ("op" "concat_strings") [get_result.$.error " on "] e)
|
||||
(call n-0 ("op" "concat_strings") [e n-0] $error)
|
||||
)
|
||||
(new $-ephemeral-stream-
|
||||
(new #-ephemeral-canon-
|
||||
(canon -relay- $-ephemeral-stream- #-ephemeral-canon-)
|
||||
)
|
||||
)
|
||||
)
|
||||
(new $-ephemeral-stream-
|
||||
(new #-ephemeral-canon-
|
||||
(canon %init_peer_id% $-ephemeral-stream- #-ephemeral-canon-)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(null)
|
||||
)
|
||||
)
|
||||
(seq
|
||||
(seq
|
||||
(new $-ephemeral-stream-
|
||||
(new #-ephemeral-canon-
|
||||
(canon -relay- $-ephemeral-stream- #-ephemeral-canon-)
|
||||
)
|
||||
)
|
||||
(new $-ephemeral-stream-
|
||||
(new #-ephemeral-canon-
|
||||
(canon %init_peer_id% $-ephemeral-stream- #-ephemeral-canon-)
|
||||
)
|
||||
)
|
||||
)
|
||||
(fail %last_error%)
|
||||
)
|
||||
)
|
||||
(next n-0)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
(null)
|
||||
)
|
||||
)
|
||||
(new $status
|
||||
(new $result-0
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(par
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("math" "sub") [1 1] sub)
|
||||
(new $successful_test
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("math" "add") [sub 1] successful_incr)
|
||||
(fold $successful successful_fold_var
|
||||
(seq
|
||||
(seq
|
||||
(ap successful_fold_var $successful_test)
|
||||
(canon %init_peer_id% $successful_test #successful_iter_canon)
|
||||
)
|
||||
(xor
|
||||
(match #successful_iter_canon.length successful_incr
|
||||
(null)
|
||||
)
|
||||
(next successful_fold_var)
|
||||
)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
)
|
||||
(canon %init_peer_id% $successful_test #successful_result_canon)
|
||||
)
|
||||
(ap #successful_result_canon successful_gate)
|
||||
)
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("math" "sub") [1 1] sub-0)
|
||||
)
|
||||
(ap "ok" $status)
|
||||
)
|
||||
(call %init_peer_id% ("peer" "timeout") [6000 "timeout"] $status)
|
||||
)
|
||||
(new $status_test
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("math" "add") [0 1] status_incr)
|
||||
(fold $status status_fold_var
|
||||
(seq
|
||||
(seq
|
||||
(ap status_fold_var $status_test)
|
||||
(canon %init_peer_id% $status_test #status_iter_canon)
|
||||
)
|
||||
(xor
|
||||
(match #status_iter_canon.length status_incr
|
||||
(null)
|
||||
)
|
||||
(next status_fold_var)
|
||||
)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
)
|
||||
(canon %init_peer_id% $status_test #status_result_canon)
|
||||
)
|
||||
(ap #status_result_canon status_gate)
|
||||
)
|
||||
)
|
||||
)
|
||||
(xor
|
||||
(match status_gate.$.[0] "ok"
|
||||
(ap true $result-0)
|
||||
)
|
||||
(ap false $result-0)
|
||||
)
|
||||
)
|
||||
(new $result-0_test
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("math" "add") [0 1] result-0_incr)
|
||||
(fold $result-0 result-0_fold_var
|
||||
(seq
|
||||
(seq
|
||||
(ap result-0_fold_var $result-0_test)
|
||||
(canon %init_peer_id% $result-0_test #result-0_iter_canon)
|
||||
)
|
||||
(xor
|
||||
(match #result-0_iter_canon.length result-0_incr
|
||||
(null)
|
||||
)
|
||||
(next result-0_fold_var)
|
||||
)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
)
|
||||
(canon %init_peer_id% $result-0_test #result-0_result_canon)
|
||||
)
|
||||
(ap #result-0_result_canon result-0_gate)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(xor
|
||||
(match result-0_gate.$.[0] false
|
||||
(ap "resource not found: timeout exceeded" $error)
|
||||
)
|
||||
(seq
|
||||
(seq
|
||||
(canon %init_peer_id% $resources #resources_canon)
|
||||
(call %init_peer_id% ("registry" "merge_keys") [#resources_canon] merge_result)
|
||||
)
|
||||
(xor
|
||||
(match merge_result.$.success true
|
||||
(ap merge_result.$.key $result)
|
||||
)
|
||||
(ap merge_result.$.error $error)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(canon %init_peer_id% $result #-result-fix-0)
|
||||
)
|
||||
(ap #-result-fix-0 -result-flat-0)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("errorHandlingSrv" "error") [%last_error% 0])
|
||||
)
|
||||
)
|
||||
(canon %init_peer_id% $error #error_canon)
|
||||
)
|
||||
(call %init_peer_id% ("callbackSrv" "response") [-result-flat-0 #error_canon])
|
||||
)
|
||||
`
|
||||
|
||||
export type GetResourceHelperResult = [{ challenge: number[]; challenge_type: string; id: string; label: string; owner_peer_id: string; signature: number[]; timestamp_created: number; } | null, string[]]
|
||||
export function getResourceHelper(
|
||||
resource_id: string,
|
||||
config?: {ttl?: number}
|
||||
): Promise<GetResourceHelperResult>;
|
||||
|
||||
export function getResourceHelper(
|
||||
peer: IFluenceClient$$,
|
||||
resource_id: string,
|
||||
config?: {ttl?: number}
|
||||
): Promise<GetResourceHelperResult>;
|
||||
|
||||
export function getResourceHelper(...args: any) {
|
||||
|
||||
|
||||
return callFunction$$(
|
||||
args,
|
||||
{
|
||||
"functionName" : "getResourceHelper",
|
||||
"arrow" : {
|
||||
"tag" : "arrow",
|
||||
"domain" : {
|
||||
"tag" : "labeledProduct",
|
||||
"fields" : {
|
||||
"resource_id" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codomain" : {
|
||||
"tag" : "unlabeledProduct",
|
||||
"items" : [
|
||||
{
|
||||
"tag" : "option",
|
||||
"type" : {
|
||||
"tag" : "struct",
|
||||
"name" : "Key",
|
||||
"fields" : {
|
||||
"challenge" : {
|
||||
"tag" : "array",
|
||||
"type" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "u8"
|
||||
}
|
||||
},
|
||||
"label" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
},
|
||||
"signature" : {
|
||||
"tag" : "array",
|
||||
"type" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "u8"
|
||||
}
|
||||
},
|
||||
"id" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
},
|
||||
"owner_peer_id" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
},
|
||||
"challenge_type" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
},
|
||||
"timestamp_created" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "u64"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"tag" : "array",
|
||||
"type" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"names" : {
|
||||
"relay" : "-relay-",
|
||||
"getDataSrv" : "getDataSrv",
|
||||
"callbackSrv" : "callbackSrv",
|
||||
"responseSrv" : "callbackSrv",
|
||||
"responseFnName" : "response",
|
||||
"errorHandlingSrv" : "errorHandlingSrv",
|
||||
"errorFnName" : "error"
|
||||
}
|
||||
},
|
||||
getResourceHelper_script
|
||||
)
|
||||
}
|
||||
|
||||
export const appendErrors_script = `
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("getDataSrv" "-relay-") [] -relay-)
|
||||
(call %init_peer_id% ("getDataSrv" "error1") [] error1-iter)
|
||||
)
|
||||
(fold error1-iter error1-item-0
|
||||
(seq
|
||||
(ap error1-item-0 $error1)
|
||||
(next error1-item-0)
|
||||
)
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("getDataSrv" "error2") [] error2-iter)
|
||||
)
|
||||
(fold error2-iter error2-item-0
|
||||
(seq
|
||||
(ap error2-item-0 $error2)
|
||||
(next error2-item-0)
|
||||
)
|
||||
)
|
||||
)
|
||||
(xor
|
||||
(seq
|
||||
(canon %init_peer_id% $error2 #error2_canon)
|
||||
(fold #error2_canon e-0
|
||||
(seq
|
||||
(ap e-0 $error1-0)
|
||||
(next e-0)
|
||||
)
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("errorHandlingSrv" "error") [%last_error% 0])
|
||||
)
|
||||
)
|
||||
`
|
||||
|
||||
|
||||
export function appendErrors(
|
||||
error1: string[],
|
||||
error2: string[],
|
||||
config?: {ttl?: number}
|
||||
): Promise<void>;
|
||||
|
||||
export function appendErrors(
|
||||
peer: IFluenceClient$$,
|
||||
error1: string[],
|
||||
error2: string[],
|
||||
config?: {ttl?: number}
|
||||
): Promise<void>;
|
||||
|
||||
export function appendErrors(...args: any) {
|
||||
|
||||
|
||||
return callFunction$$(
|
||||
args,
|
||||
{
|
||||
"functionName" : "appendErrors",
|
||||
"arrow" : {
|
||||
"tag" : "arrow",
|
||||
"domain" : {
|
||||
"tag" : "labeledProduct",
|
||||
"fields" : {
|
||||
"error1" : {
|
||||
"tag" : "array",
|
||||
"type" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
}
|
||||
},
|
||||
"error2" : {
|
||||
"tag" : "array",
|
||||
"type" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"codomain" : {
|
||||
"tag" : "nil"
|
||||
}
|
||||
},
|
||||
"names" : {
|
||||
"relay" : "-relay-",
|
||||
"getDataSrv" : "getDataSrv",
|
||||
"callbackSrv" : "callbackSrv",
|
||||
"responseSrv" : "callbackSrv",
|
||||
"responseFnName" : "response",
|
||||
"errorHandlingSrv" : "errorHandlingSrv",
|
||||
"errorFnName" : "error"
|
||||
}
|
||||
},
|
||||
appendErrors_script
|
||||
)
|
||||
}
|
||||
|
||||
export const getNeighbors_script = `
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("getDataSrv" "-relay-") [] -relay-)
|
||||
(call %init_peer_id% ("getDataSrv" "resource_id") [] resource_id)
|
||||
)
|
||||
(xor
|
||||
(seq
|
||||
(call %init_peer_id% ("op" "string_to_b58") [resource_id] k)
|
||||
(call %init_peer_id% ("kad" "neighborhood") [k [] []] nodes)
|
||||
)
|
||||
(call %init_peer_id% ("errorHandlingSrv" "error") [%last_error% 0])
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("callbackSrv" "response") [nodes])
|
||||
)
|
||||
`
|
||||
|
||||
|
||||
export function getNeighbors(
|
||||
resource_id: string,
|
||||
config?: {ttl?: number}
|
||||
): Promise<string[]>;
|
||||
|
||||
export function getNeighbors(
|
||||
peer: IFluenceClient$$,
|
||||
resource_id: string,
|
||||
config?: {ttl?: number}
|
||||
): Promise<string[]>;
|
||||
|
||||
export function getNeighbors(...args: any) {
|
||||
|
||||
|
||||
return callFunction$$(
|
||||
args,
|
||||
{
|
||||
"functionName" : "getNeighbors",
|
||||
"arrow" : {
|
||||
"tag" : "arrow",
|
||||
"domain" : {
|
||||
"tag" : "labeledProduct",
|
||||
"fields" : {
|
||||
"resource_id" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codomain" : {
|
||||
"tag" : "unlabeledProduct",
|
||||
"items" : [
|
||||
{
|
||||
"tag" : "array",
|
||||
"type" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "string"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"names" : {
|
||||
"relay" : "-relay-",
|
||||
"getDataSrv" : "getDataSrv",
|
||||
"callbackSrv" : "callbackSrv",
|
||||
"responseSrv" : "callbackSrv",
|
||||
"responseFnName" : "response",
|
||||
"errorHandlingSrv" : "errorHandlingSrv",
|
||||
"errorFnName" : "error"
|
||||
}
|
||||
},
|
||||
getNeighbors_script
|
||||
)
|
||||
}
|
||||
|
||||
export const wait_script = `
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("getDataSrv" "-relay-") [] -relay-)
|
||||
(call %init_peer_id% ("getDataSrv" "successful") [] successful-iter)
|
||||
)
|
||||
(fold successful-iter successful-item-0
|
||||
(seq
|
||||
(ap successful-item-0 $successful)
|
||||
(next successful-item-0)
|
||||
)
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("getDataSrv" "len") [] len)
|
||||
)
|
||||
(call %init_peer_id% ("getDataSrv" "timeout") [] timeout)
|
||||
)
|
||||
(xor
|
||||
(new $status
|
||||
(new $result
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(par
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("math" "sub") [len 1] sub)
|
||||
(new $successful_test
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("math" "add") [sub 1] successful_incr)
|
||||
(fold $successful successful_fold_var
|
||||
(seq
|
||||
(seq
|
||||
(ap successful_fold_var $successful_test)
|
||||
(canon %init_peer_id% $successful_test #successful_iter_canon)
|
||||
)
|
||||
(xor
|
||||
(match #successful_iter_canon.length successful_incr
|
||||
(null)
|
||||
)
|
||||
(next successful_fold_var)
|
||||
)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
)
|
||||
(canon %init_peer_id% $successful_test #successful_result_canon)
|
||||
)
|
||||
(ap #successful_result_canon successful_gate)
|
||||
)
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("math" "sub") [len 1] sub-0)
|
||||
)
|
||||
(ap "ok" $status)
|
||||
)
|
||||
(call %init_peer_id% ("peer" "timeout") [timeout "timeout"] $status)
|
||||
)
|
||||
(new $status_test
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("math" "add") [0 1] status_incr)
|
||||
(fold $status status_fold_var
|
||||
(seq
|
||||
(seq
|
||||
(ap status_fold_var $status_test)
|
||||
(canon %init_peer_id% $status_test #status_iter_canon)
|
||||
)
|
||||
(xor
|
||||
(match #status_iter_canon.length status_incr
|
||||
(null)
|
||||
)
|
||||
(next status_fold_var)
|
||||
)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
)
|
||||
(canon %init_peer_id% $status_test #status_result_canon)
|
||||
)
|
||||
(ap #status_result_canon status_gate)
|
||||
)
|
||||
)
|
||||
)
|
||||
(xor
|
||||
(match status_gate.$.[0] "ok"
|
||||
(ap true $result)
|
||||
)
|
||||
(ap false $result)
|
||||
)
|
||||
)
|
||||
(new $result_test
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call %init_peer_id% ("math" "add") [0 1] result_incr)
|
||||
(fold $result result_fold_var
|
||||
(seq
|
||||
(seq
|
||||
(ap result_fold_var $result_test)
|
||||
(canon %init_peer_id% $result_test #result_iter_canon)
|
||||
)
|
||||
(xor
|
||||
(match #result_iter_canon.length result_incr
|
||||
(null)
|
||||
)
|
||||
(next result_fold_var)
|
||||
)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
)
|
||||
(canon %init_peer_id% $result_test #result_result_canon)
|
||||
)
|
||||
(ap #result_result_canon result_gate)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("errorHandlingSrv" "error") [%last_error% 0])
|
||||
)
|
||||
)
|
||||
(call %init_peer_id% ("callbackSrv" "response") [result_gate.$.[0]])
|
||||
)
|
||||
`
|
||||
|
||||
|
||||
export function wait(
|
||||
successful: boolean[],
|
||||
len: number,
|
||||
timeout: number,
|
||||
config?: {ttl?: number}
|
||||
): Promise<boolean>;
|
||||
|
||||
export function wait(
|
||||
peer: IFluenceClient$$,
|
||||
successful: boolean[],
|
||||
len: number,
|
||||
timeout: number,
|
||||
config?: {ttl?: number}
|
||||
): Promise<boolean>;
|
||||
|
||||
export function wait(...args: any) {
|
||||
|
||||
|
||||
return callFunction$$(
|
||||
args,
|
||||
{
|
||||
"functionName" : "wait",
|
||||
"arrow" : {
|
||||
"tag" : "arrow",
|
||||
"domain" : {
|
||||
"tag" : "labeledProduct",
|
||||
"fields" : {
|
||||
"successful" : {
|
||||
"tag" : "array",
|
||||
"type" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "bool"
|
||||
}
|
||||
},
|
||||
"len" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "i16"
|
||||
},
|
||||
"timeout" : {
|
||||
"tag" : "scalar",
|
||||
"name" : "u16"
|
||||
}
|
||||
}
|
||||
},
|
||||
"codomain" : {
|
||||
"tag" : "unlabeledProduct",
|
||||
"items" : [
|
||||
{
|
||||
"tag" : "scalar",
|
||||
"name" : "bool"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"names" : {
|
||||
"relay" : "-relay-",
|
||||
"getDataSrv" : "getDataSrv",
|
||||
"callbackSrv" : "callbackSrv",
|
||||
"responseSrv" : "callbackSrv",
|
||||
"responseFnName" : "response",
|
||||
"errorHandlingSrv" : "errorHandlingSrv",
|
||||
"errorFnName" : "error"
|
||||
}
|
||||
},
|
||||
wait_script
|
||||
)
|
||||
}
|
||||
|
||||
/* eslint-enable */
|
1341
aqua/target/typescript/registry-api.ts
Normal file
1341
aqua/target/typescript/registry-api.ts
Normal file
File diff suppressed because it is too large
Load Diff
322
aqua/target/typescript/registry-scheduled-scripts.ts
Normal file
322
aqua/target/typescript/registry-scheduled-scripts.ts
Normal file
@ -0,0 +1,322 @@
|
||||
/* eslint-disable */
|
||||
// @ts-nocheck
|
||||
/**
|
||||
*
|
||||
* This file is auto-generated. Do not edit manually: changes may be erased.
|
||||
* Generated by Aqua compiler: https://github.com/fluencelabs/aqua/.
|
||||
* If you find any bugs, please write an issue on GitHub: https://github.com/fluencelabs/aqua/issues
|
||||
* Aqua version: 0.11.9-release-please-1c9388a-1275-1
|
||||
*
|
||||
*/
|
||||
import type { IFluenceClient as IFluenceClient$$, CallParams as CallParams$$ } from '@fluencelabs/js-client.api';
|
||||
import {
|
||||
v5_callFunction as callFunction$$,
|
||||
v5_registerService as registerService$$,
|
||||
} from '@fluencelabs/js-client.api';
|
||||
|
||||
|
||||
|
||||
// Services
|
||||
|
||||
// Functions
|
||||
export const replicate_3600_script = `
|
||||
(seq
|
||||
(call %init_peer_id% ("getDataSrv" "-relay-") [] -relay-)
|
||||
(xor
|
||||
(xor
|
||||
(seq
|
||||
(seq
|
||||
(call -relay- ("peer" "timestamp_sec") [] t)
|
||||
(call -relay- ("registry" "evict_stale") [t] res)
|
||||
)
|
||||
(par
|
||||
(fold res.$.results r-0
|
||||
(par
|
||||
(seq
|
||||
(seq
|
||||
(call -relay- ("op" "string_to_b58") [r-0.$.key.id] k)
|
||||
(call -relay- ("kad" "neighborhood") [k [] []] nodes)
|
||||
)
|
||||
(par
|
||||
(fold nodes n-0
|
||||
(par
|
||||
(new $records_weights
|
||||
(xor
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call n-0 ("peer" "timestamp_sec") [] tt)
|
||||
(call n-0 ("trust-graph" "get_weight") [r-0.$.key.owner_peer_id tt] key_weight)
|
||||
)
|
||||
(call n-0 ("registry" "republish_key") [r-0.$.key key_weight tt])
|
||||
)
|
||||
(fold r-0.$.records record-0
|
||||
(seq
|
||||
(call n-0 ("trust-graph" "get_weight") [record-0.$.metadata.issued_by tt] $records_weights)
|
||||
(next record-0)
|
||||
)
|
||||
)
|
||||
)
|
||||
(canon n-0 $records_weights #records_weights_canon)
|
||||
)
|
||||
(call n-0 ("registry" "republish_records") [r-0.$.records #records_weights_canon tt])
|
||||
)
|
||||
(fail %last_error%)
|
||||
)
|
||||
)
|
||||
(next n-0)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
(null)
|
||||
)
|
||||
)
|
||||
(next r-0)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
(null)
|
||||
)
|
||||
)
|
||||
(fail %last_error%)
|
||||
)
|
||||
(call %init_peer_id% ("errorHandlingSrv" "error") [%last_error% 0])
|
||||
)
|
||||
)
|
||||
`
|
||||
|
||||
|
||||
export function replicate_3600(
|
||||
config?: {ttl?: number}
|
||||
): Promise<void>;
|
||||
|
||||
export function replicate_3600(
|
||||
peer: IFluenceClient$$,
|
||||
config?: {ttl?: number}
|
||||
): Promise<void>;
|
||||
|
||||
export function replicate_3600(...args: any) {
|
||||
|
||||
|
||||
return callFunction$$(
|
||||
args,
|
||||
{
|
||||
"functionName" : "replicate_3600",
|
||||
"arrow" : {
|
||||
"tag" : "arrow",
|
||||
"domain" : {
|
||||
"tag" : "labeledProduct",
|
||||
"fields" : {
|
||||
|
||||
}
|
||||
},
|
||||
"codomain" : {
|
||||
"tag" : "nil"
|
||||
}
|
||||
},
|
||||
"names" : {
|
||||
"relay" : "-relay-",
|
||||
"getDataSrv" : "getDataSrv",
|
||||
"callbackSrv" : "callbackSrv",
|
||||
"responseSrv" : "callbackSrv",
|
||||
"responseFnName" : "response",
|
||||
"errorHandlingSrv" : "errorHandlingSrv",
|
||||
"errorFnName" : "error"
|
||||
}
|
||||
},
|
||||
replicate_3600_script
|
||||
)
|
||||
}
|
||||
|
||||
export const clearExpired_86400_script = `
|
||||
(seq
|
||||
(call %init_peer_id% ("getDataSrv" "-relay-") [] -relay-)
|
||||
(xor
|
||||
(xor
|
||||
(seq
|
||||
(call -relay- ("peer" "timestamp_sec") [] t)
|
||||
(call -relay- ("registry" "clear_expired") [t])
|
||||
)
|
||||
(fail %last_error%)
|
||||
)
|
||||
(call %init_peer_id% ("errorHandlingSrv" "error") [%last_error% 0])
|
||||
)
|
||||
)
|
||||
`
|
||||
|
||||
|
||||
export function clearExpired_86400(
|
||||
config?: {ttl?: number}
|
||||
): Promise<void>;
|
||||
|
||||
export function clearExpired_86400(
|
||||
peer: IFluenceClient$$,
|
||||
config?: {ttl?: number}
|
||||
): Promise<void>;
|
||||
|
||||
export function clearExpired_86400(...args: any) {
|
||||
|
||||
|
||||
return callFunction$$(
|
||||
args,
|
||||
{
|
||||
"functionName" : "clearExpired_86400",
|
||||
"arrow" : {
|
||||
"tag" : "arrow",
|
||||
"domain" : {
|
||||
"tag" : "labeledProduct",
|
||||
"fields" : {
|
||||
|
||||
}
|
||||
},
|
||||
"codomain" : {
|
||||
"tag" : "nil"
|
||||
}
|
||||
},
|
||||
"names" : {
|
||||
"relay" : "-relay-",
|
||||
"getDataSrv" : "getDataSrv",
|
||||
"callbackSrv" : "callbackSrv",
|
||||
"responseSrv" : "callbackSrv",
|
||||
"responseFnName" : "response",
|
||||
"errorHandlingSrv" : "errorHandlingSrv",
|
||||
"errorFnName" : "error"
|
||||
}
|
||||
},
|
||||
clearExpired_86400_script
|
||||
)
|
||||
}
|
||||
|
||||
export const renew_43200_script = `
|
||||
(seq
|
||||
(call %init_peer_id% ("getDataSrv" "-relay-") [] -relay-)
|
||||
(xor
|
||||
(xor
|
||||
(seq
|
||||
(seq
|
||||
(call -relay- ("peer" "timestamp_sec") [] t)
|
||||
(call -relay- ("registry" "get_stale_local_records") [t] res)
|
||||
)
|
||||
(par
|
||||
(fold res.$.result r-0
|
||||
(par
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(new $signature
|
||||
(seq
|
||||
(xor
|
||||
(mismatch r-0.$.metadata.peer_id %init_peer_id%
|
||||
(xor
|
||||
(seq
|
||||
(call r-0.$.metadata.peer_id ("registry" "get_record_bytes") [r-0.$.metadata t] bytes)
|
||||
(call r-0.$.metadata.peer_id ("sig" "sign") [bytes] $signature)
|
||||
)
|
||||
(fail %last_error%)
|
||||
)
|
||||
)
|
||||
(xor
|
||||
(seq
|
||||
(call -relay- ("registry" "get_record_bytes") [r-0.$.metadata t] bytess)
|
||||
(xor
|
||||
(call %init_peer_id% ("sig" "sign") [bytess] $signature)
|
||||
(fail %last_error%)
|
||||
)
|
||||
)
|
||||
(fail %last_error%)
|
||||
)
|
||||
)
|
||||
(new $signature_test
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call -relay- ("math" "add") [0 1] signature_incr)
|
||||
(fold $signature signature_fold_var
|
||||
(seq
|
||||
(seq
|
||||
(ap signature_fold_var $signature_test)
|
||||
(canon -relay- $signature_test #signature_iter_canon)
|
||||
)
|
||||
(xor
|
||||
(match #signature_iter_canon.length signature_incr
|
||||
(null)
|
||||
)
|
||||
(next signature_fold_var)
|
||||
)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
)
|
||||
(canon -relay- $signature_test #signature_result_canon)
|
||||
)
|
||||
(ap #signature_result_canon signature_gate)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(call -relay- ("peer" "timestamp_sec") [] t-0)
|
||||
)
|
||||
(call -relay- ("trust-graph" "get_weight") [r-0.$.metadata.issued_by t-0] weight)
|
||||
)
|
||||
(call -relay- ("registry" "put_record") [r-0.$.metadata t signature_gate.$.[0].signature.[0] weight t-0] result)
|
||||
)
|
||||
(next r-0)
|
||||
)
|
||||
(never)
|
||||
)
|
||||
(null)
|
||||
)
|
||||
)
|
||||
(fail %last_error%)
|
||||
)
|
||||
(call %init_peer_id% ("errorHandlingSrv" "error") [%last_error% 0])
|
||||
)
|
||||
)
|
||||
`
|
||||
|
||||
|
||||
export function renew_43200(
|
||||
config?: {ttl?: number}
|
||||
): Promise<void>;
|
||||
|
||||
export function renew_43200(
|
||||
peer: IFluenceClient$$,
|
||||
config?: {ttl?: number}
|
||||
): Promise<void>;
|
||||
|
||||
export function renew_43200(...args: any) {
|
||||
|
||||
|
||||
return callFunction$$(
|
||||
args,
|
||||
{
|
||||
"functionName" : "renew_43200",
|
||||
"arrow" : {
|
||||
"tag" : "arrow",
|
||||
"domain" : {
|
||||
"tag" : "labeledProduct",
|
||||
"fields" : {
|
||||
|
||||
}
|
||||
},
|
||||
"codomain" : {
|
||||
"tag" : "nil"
|
||||
}
|
||||
},
|
||||
"names" : {
|
||||
"relay" : "-relay-",
|
||||
"getDataSrv" : "getDataSrv",
|
||||
"callbackSrv" : "callbackSrv",
|
||||
"responseSrv" : "callbackSrv",
|
||||
"responseFnName" : "response",
|
||||
"errorHandlingSrv" : "errorHandlingSrv",
|
||||
"errorFnName" : "error"
|
||||
}
|
||||
},
|
||||
renew_43200_script
|
||||
)
|
||||
}
|
||||
|
||||
/* eslint-enable */
|
31
build.sh
Executable file
31
build.sh
Executable file
@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
set -x
|
||||
|
||||
# set current working directory to script directory to run script from everywhere
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# Build the service
|
||||
./service/build.sh
|
||||
|
||||
DISTRO_TARGET=distro/registry-service
|
||||
mkdir -p "$DISTRO_TARGET"
|
||||
|
||||
cd ./aqua
|
||||
npm pack
|
||||
cd -
|
||||
|
||||
packed_archive_file_name_pattern="fluencelabs-registry-"
|
||||
packed_archive_file_name=$(find "./aqua" -type f -name "${packed_archive_file_name_pattern}*")
|
||||
|
||||
cd ./aqua-tests
|
||||
echo " '@fluencelabs/registry': file:.$packed_archive_file_name" >> "./fluence.yaml"
|
||||
fluence dep i
|
||||
fluence aqua -i ./spell/spell.aqua --no-relay --air -o "../$DISTRO_TARGET/air"
|
||||
cd -
|
||||
|
||||
cp service/artifacts/registry.wasm service/artifacts/sqlite3.wasm distro/Config.toml "$DISTRO_TARGET"
|
||||
|
||||
cd distro
|
||||
cargo build
|
@ -1,7 +0,0 @@
|
||||
{
|
||||
"name": "registry",
|
||||
"dependencies": [
|
||||
"name:sqlite3",
|
||||
"name:registry"
|
||||
]
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -o pipefail -o nounset -o errexit
|
||||
|
||||
# set current working directory to script directory to run script from everywhere
|
||||
cd "$(dirname "$0")"
|
||||
SCRIPT_DIR="$(pwd)"
|
||||
SCHEDULED="${SCRIPT_DIR}/scheduled"
|
||||
|
||||
(
|
||||
echo "*** compile scheduled scripts ***"
|
||||
cd ../aqua
|
||||
npx aqua --no-relay --air -i ./registry-scheduled-scripts.aqua -o "$SCHEDULED"
|
||||
)
|
||||
|
||||
(
|
||||
echo "*** copy wasm files ***"
|
||||
cd ../service
|
||||
cp artifacts/*.wasm "$SCRIPT_DIR"
|
||||
)
|
||||
|
||||
(
|
||||
echo "*** create builtin distribution package ***"
|
||||
cd ..
|
||||
mv builtin-package registry
|
||||
tar --exclude="package.sh" -f registry.tar.gz -zcv ./registry
|
||||
mv registry builtin-package
|
||||
)
|
||||
|
||||
echo "*** done ***"
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"name": "registry",
|
||||
"mem_page_count": 1,
|
||||
"preopened_files": [
|
||||
"/tmp"
|
||||
],
|
||||
"mapped_dirs": {
|
||||
"tmp": "./tmp"
|
||||
}
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
{
|
||||
"name": "sqlite3",
|
||||
"mem_pages_count": 100
|
||||
}
|
53
distro/Cargo.lock
generated
Normal file
53
distro/Cargo.lock
generated
Normal file
@ -0,0 +1,53 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "built"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38d17f4d6e4dc36d1a02fbedc2753a096848e7c1b0772f7654eab8e2c927dd53"
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
|
||||
|
||||
[[package]]
|
||||
name = "maplit"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d"
|
||||
|
||||
[[package]]
|
||||
name = "registry-distro"
|
||||
version = "0.9.4"
|
||||
dependencies = [
|
||||
"built",
|
||||
"maplit",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.160"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c"
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.96"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
16
distro/Cargo.toml
Normal file
16
distro/Cargo.toml
Normal file
@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "registry-distro"
|
||||
version = "0.9.4"
|
||||
edition = "2021"
|
||||
build = "built.rs"
|
||||
include = [ "/src", "built.rs", "Cargo.toml", "registry-service"]
|
||||
description = "Distribution package for the registry service including scheduled scripts"
|
||||
license = "Apache-2.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[dependencies]
|
||||
maplit = "1.0.2"
|
||||
serde_json = "1.0.96"
|
||||
|
||||
[build-dependencies]
|
||||
built = "0.7.1"
|
16
distro/Config.toml
Normal file
16
distro/Config.toml
Normal file
@ -0,0 +1,16 @@
|
||||
modules_dir = "."
|
||||
total_memory_limit = "Infinity"
|
||||
|
||||
[[module]]
|
||||
name = "sqlite3"
|
||||
mem_pages_count = 100
|
||||
logger_enabled = false
|
||||
|
||||
[module.wasi]
|
||||
preopened_files = ["./tmp"]
|
||||
mapped_dirs = { "tmp" = "./tmp" }
|
||||
|
||||
[[module]]
|
||||
name = "registry"
|
||||
mem_pages_count = 1
|
||||
logger_enabled = false
|
3
distro/built.rs
Normal file
3
distro/built.rs
Normal file
@ -0,0 +1,3 @@
|
||||
fn main() {
|
||||
built::write_built_file().expect("Failed to acquire build-time information")
|
||||
}
|
51
distro/src/lib.rs
Normal file
51
distro/src/lib.rs
Normal file
@ -0,0 +1,51 @@
|
||||
use maplit::hashmap;
|
||||
use std::collections::HashMap;
|
||||
use serde_json::{json, Value as JValue};
|
||||
|
||||
pub const REGISTRY_WASM: &'static [u8] = include_bytes!("../registry-service/registry.wasm");
|
||||
pub const SQLITE_WASM: &'static [u8] = include_bytes!("../registry-service/sqlite3.wasm");
|
||||
pub const CONFIG: &'static [u8] = include_bytes!("../registry-service/Config.toml");
|
||||
|
||||
pub const REGISTRY_SPELL: &'static str =
|
||||
include_str!("../registry-service/air/spell.spell.air");
|
||||
|
||||
pub mod build_info {
|
||||
include!(concat!(env!("OUT_DIR"), "/built.rs"));
|
||||
}
|
||||
|
||||
pub use build_info::PKG_VERSION as VERSION;
|
||||
|
||||
pub fn modules() -> std::collections::HashMap<&'static str, &'static [u8]> {
|
||||
maplit::hashmap! {
|
||||
"sqlite3" => SQLITE_WASM,
|
||||
"registry" => REGISTRY_WASM,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DistrSpell {
|
||||
/// AIR script of the spell
|
||||
pub air: &'static str,
|
||||
/// Initial key-value records for spells KV storage
|
||||
pub init_data: HashMap<&'static str, JValue>,
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RegistryConfig {
|
||||
pub expired_interval: u32,
|
||||
pub renew_interval: u32,
|
||||
pub replicate_interval: u32
|
||||
}
|
||||
|
||||
pub fn registry_spell(config: RegistryConfig) -> DistrSpell {
|
||||
DistrSpell {
|
||||
air: REGISTRY_SPELL,
|
||||
init_data: hashmap!{
|
||||
"config" => json!( {
|
||||
"expired_interval": config.expired_interval,
|
||||
"renew_interval": config.renew_interval,
|
||||
"replicate_interval": config.replicate_interval,
|
||||
}),
|
||||
},
|
||||
}
|
||||
}
|
7795
example/package-lock.json
generated
7795
example/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -1,30 +0,0 @@
|
||||
{
|
||||
"name": "example",
|
||||
"version": "1.0.0",
|
||||
"description": "An example of how to use Registry in TypeScript",
|
||||
"main": "dist/example.js",
|
||||
"scripts": {
|
||||
"compile-aqua": "aqua -i ./src/aqua -o ./src/generated",
|
||||
"build": "npm run compile-aqua && tsc",
|
||||
"prestart": "npm run build",
|
||||
"start": "node dist/example.js"
|
||||
},
|
||||
"keywords": [
|
||||
"aqua",
|
||||
"dht",
|
||||
"p2p"
|
||||
],
|
||||
"author": "Fluence Labs",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@fluencelabs/registry": "../aqua",
|
||||
"@fluencelabs/aqua-lib": "^0.4.3",
|
||||
"@fluencelabs/aqua": "^0.7.0-285",
|
||||
"@fluencelabs/fluence": "0.21.6",
|
||||
"@fluencelabs/fluence-network-environment": "^1.0.13",
|
||||
"@fluencelabs/trust-graph": "^3.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^4.4.3"
|
||||
}
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
-- This file demonstrates how to send events to subscribers of a topic
|
||||
-- Detailed explanation can be found in the Aqua Book: https://doc.fluence.dev/aqua-book/libraries/aqua-dht#passing-data-to-subscribers
|
||||
|
||||
import "@fluencelabs/registry/routing.aqua"
|
||||
import "@fluencelabs/registry/registry.aqua"
|
||||
import PeerId from "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
|
||||
-- Application event
|
||||
data Event:
|
||||
value: string
|
||||
|
||||
-- API that every subscriber must adhere to
|
||||
-- You can think of it as an application protocol
|
||||
service EventAPI:
|
||||
receive_event(event: Event)
|
||||
|
||||
func notify_peer(rec: Record, event: Event):
|
||||
-- topological move to peer via relay
|
||||
on rec.peer_id via rec.relay_id:
|
||||
-- resolve service on a peer
|
||||
EventAPI rec.service_id!
|
||||
-- call function
|
||||
EventAPI.receive_event(event)
|
||||
|
||||
-- send event to every peer registered on route
|
||||
func send_everyone(route_id: string, event: Event, ack: i16):
|
||||
on HOST_PEER_ID:
|
||||
-- retrieve all peers registered to the route
|
||||
records <- resolveRoute(route_id, ack)
|
||||
-- iterate through them
|
||||
for rec <- records par:
|
||||
notify_peer(rec, event)
|
@ -1,9 +0,0 @@
|
||||
module Export
|
||||
import createRouteAndRegisterBlocking, resolveRoute from "@fluencelabs/registry/routing.aqua"
|
||||
import Peer from "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
export createRouteAndRegisterBlocking, resolveRoute, timestamp_sec
|
||||
|
||||
|
||||
func timestamp_sec() -> u64:
|
||||
result <- Peer.timestamp_sec()
|
||||
<- result
|
@ -1,51 +0,0 @@
|
||||
import {Fluence, KeyPair} from "@fluencelabs/fluence";
|
||||
import { krasnodar, Node } from "@fluencelabs/fluence-network-environment";
|
||||
import {createRouteAndRegisterBlocking, resolveRoute, timestamp_sec} from "./generated/export";
|
||||
|
||||
let local: Node[] = [
|
||||
{
|
||||
peerId: "12D3KooWHBG9oaVx4i3vi6c1rSBUm7MLBmyGmmbHoZ23pmjDCnvK",
|
||||
multiaddr:
|
||||
"/ip4/127.0.0.1/tcp/9990/ws/p2p/12D3KooWHBG9oaVx4i3vi6c1rSBUm7MLBmyGmmbHoZ23pmjDCnvK",
|
||||
},
|
||||
{
|
||||
peerId: "12D3KooWRABanQHUn28dxavN9ZS1zZghqoZVAYtFpoN7FdtoGTFv",
|
||||
multiaddr:
|
||||
"/ip4/127.0.0.1/tcp/9991/ws/p2p/12D3KooWRABanQHUn28dxavN9ZS1zZghqoZVAYtFpoN7FdtoGTFv",
|
||||
},
|
||||
{
|
||||
peerId: "12D3KooWFpQ7LHxcC9FEBUh3k4nSCC12jBhijJv3gJbi7wsNYzJ5",
|
||||
multiaddr:
|
||||
"/ip4/127.0.0.1/tcp/9992/ws/p2p/12D3KooWFpQ7LHxcC9FEBUh3k4nSCC12jBhijJv3gJbi7wsNYzJ5",
|
||||
},
|
||||
];
|
||||
|
||||
async function main() {
|
||||
// connect to the Fluence network
|
||||
await Fluence.start({ connectTo: krasnodar[0] });
|
||||
console.log("%s", await timestamp_sec());
|
||||
console.log(
|
||||
"📗 created a fluence peer %s with relay %s",
|
||||
Fluence.getStatus().peerId,
|
||||
Fluence.getStatus().relayPeerId
|
||||
);
|
||||
let label = "myLabel";
|
||||
let value = "myValue";
|
||||
console.log("Will create route with label:", label);
|
||||
// create route (if not exists) and register on it
|
||||
let route_id = await createRouteAndRegisterBlocking(
|
||||
label, value, null,
|
||||
(s) => console.log(`node ${s} saved the record`),
|
||||
5
|
||||
);
|
||||
// find other peers on this route
|
||||
console.log("let's resolve route for %s", route_id);
|
||||
let providers = await resolveRoute(route_id, 5);
|
||||
console.log("route providers:", providers);
|
||||
}
|
||||
|
||||
main().then(() => process.exit(0))
|
||||
.catch(error => {
|
||||
console.error(error);
|
||||
process.exit(1);
|
||||
});
|
@ -1,69 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
/* Visit https://aka.ms/tsconfig.json to read more about this file */
|
||||
|
||||
/* Basic Options */
|
||||
// "incremental": true, /* Enable incremental compilation */
|
||||
"target": "es5", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */
|
||||
"module": "commonjs", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */
|
||||
// "lib": [], /* Specify library files to be included in the compilation. */
|
||||
// "allowJs": true, /* Allow javascript files to be compiled. */
|
||||
// "checkJs": true, /* Report errors in .js files. */
|
||||
// "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */
|
||||
// "declaration": true, /* Generates corresponding '.d.ts' file. */
|
||||
// "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */
|
||||
// "sourceMap": true, /* Generates corresponding '.map' file. */
|
||||
// "outFile": "./", /* Concatenate and emit output to single file. */
|
||||
"outDir": "./dist", /* Redirect output structure to the directory. */
|
||||
// "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */
|
||||
// "composite": true, /* Enable project compilation */
|
||||
// "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */
|
||||
// "removeComments": true, /* Do not emit comments to output. */
|
||||
// "noEmit": true, /* Do not emit outputs. */
|
||||
// "importHelpers": true, /* Import emit helpers from 'tslib'. */
|
||||
// "downlevelIteration": true, /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */
|
||||
// "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */
|
||||
|
||||
/* Strict Type-Checking Options */
|
||||
"strict": true, /* Enable all strict type-checking options. */
|
||||
// "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */
|
||||
// "strictNullChecks": true, /* Enable strict null checks. */
|
||||
// "strictFunctionTypes": true, /* Enable strict checking of function types. */
|
||||
// "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */
|
||||
// "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */
|
||||
// "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */
|
||||
// "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */
|
||||
|
||||
/* Additional Checks */
|
||||
// "noUnusedLocals": true, /* Report errors on unused locals. */
|
||||
// "noUnusedParameters": true, /* Report errors on unused parameters. */
|
||||
// "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */
|
||||
// "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */
|
||||
|
||||
/* Module Resolution Options */
|
||||
// "moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */
|
||||
// "baseUrl": "./", /* Base directory to resolve non-absolute module names. */
|
||||
// "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */
|
||||
// "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */
|
||||
// "typeRoots": [], /* List of folders to include type definitions from. */
|
||||
// "types": [], /* Type declaration files to be included in compilation. */
|
||||
// "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */
|
||||
"esModuleInterop": true, /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */
|
||||
// "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */
|
||||
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
|
||||
|
||||
/* Source Map Options */
|
||||
// "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */
|
||||
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
|
||||
// "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */
|
||||
// "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */
|
||||
|
||||
/* Experimental Options */
|
||||
// "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */
|
||||
// "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */
|
||||
|
||||
/* Advanced Options */
|
||||
"skipLibCheck": true, /* Skip type checking of declaration files. */
|
||||
"forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */
|
||||
}
|
||||
}
|
24
examples/archived/1-registry/.fluence/aqua/deals.aqua
Normal file
24
examples/archived/1-registry/.fluence/aqua/deals.aqua
Normal file
@ -0,0 +1,24 @@
|
||||
aqua Deals declares *
|
||||
|
||||
data Deal:
|
||||
definition: string
|
||||
timestamp: string
|
||||
dealIdOriginal: string
|
||||
dealId: string
|
||||
chainNetwork: string
|
||||
chainNetworkId: u64
|
||||
|
||||
data Deals:
|
||||
dealName: ?Deal
|
||||
|
||||
func get() -> Deals:
|
||||
<- Deals(
|
||||
dealName=?[Deal(
|
||||
definition="bafkreidqtqpmmferdscg4bqrs74cl6ckib3vyhvejhrc4watln5xxcrj2i",
|
||||
timestamp="2023-12-19T20:01:24.334Z",
|
||||
dealIdOriginal="0xEb92A1B5c10AD7BFdcaf23Cb7DDA9ea062CD07E8",
|
||||
dealId="eb92a1b5c10ad7bfdcaf23cb7dda9ea062cd07e8",
|
||||
chainNetwork="local",
|
||||
chainNetworkId=31337
|
||||
)]
|
||||
)
|
4
examples/archived/1-registry/.fluence/aqua/hosts.aqua
Normal file
4
examples/archived/1-registry/.fluence/aqua/hosts.aqua
Normal file
@ -0,0 +1,4 @@
|
||||
aqua Hosts declares *
|
||||
|
||||
func get() -> ?u8:
|
||||
<- nil
|
2
examples/archived/1-registry/.fluence/aqua/services.aqua
Normal file
2
examples/archived/1-registry/.fluence/aqua/services.aqua
Normal file
@ -0,0 +1,2 @@
|
||||
service EchoService("echo_service"):
|
||||
echo(msg: string) -> string
|
@ -0,0 +1,20 @@
|
||||
aquavm_pool_size = 2
|
||||
tcp_port = 7_771
|
||||
websocket_port = 9_991
|
||||
http_port = 18_080
|
||||
|
||||
[system_services]
|
||||
enable = [ "registry", "decider" ]
|
||||
|
||||
[system_services.aqua_ipfs]
|
||||
external_api_multiaddr = "/ip4/127.0.0.1/tcp/5001"
|
||||
local_api_multiaddr = "/dns4/ipfs/tcp/5001"
|
||||
|
||||
[system_services.decider]
|
||||
decider_period_sec = 10
|
||||
worker_ipfs_multiaddr = "/dns4/ipfs/tcp/5001"
|
||||
network_api_endpoint = "http://chain:8545"
|
||||
network_id = 31_337
|
||||
start_block = "earliest"
|
||||
matcher_address = "0x0e1F3B362E22B2Dc82C9E35d6e62998C7E8e2349"
|
||||
wallet_key = "0x3cc23e0227bd17ea5d6ea9d42b5eaa53ad41b1974de4755c79fe236d361a6fd5"
|
@ -0,0 +1,20 @@
|
||||
aquavm_pool_size = 2
|
||||
tcp_port = 7_772
|
||||
websocket_port = 9_992
|
||||
http_port = 18_081
|
||||
|
||||
[system_services]
|
||||
enable = [ "registry", "decider" ]
|
||||
|
||||
[system_services.aqua_ipfs]
|
||||
external_api_multiaddr = "/ip4/127.0.0.1/tcp/5001"
|
||||
local_api_multiaddr = "/dns4/ipfs/tcp/5001"
|
||||
|
||||
[system_services.decider]
|
||||
decider_period_sec = 10
|
||||
worker_ipfs_multiaddr = "/dns4/ipfs/tcp/5001"
|
||||
network_api_endpoint = "http://chain:8545"
|
||||
network_id = 31_337
|
||||
start_block = "earliest"
|
||||
matcher_address = "0x0e1F3B362E22B2Dc82C9E35d6e62998C7E8e2349"
|
||||
wallet_key = "0x089162470bcfc93192b95bff0a1860d063266875c782af9d882fcca125323b41"
|
@ -0,0 +1,20 @@
|
||||
aquavm_pool_size = 2
|
||||
tcp_port = 7_773
|
||||
websocket_port = 9_993
|
||||
http_port = 18_082
|
||||
|
||||
[system_services]
|
||||
enable = [ "registry", "decider" ]
|
||||
|
||||
[system_services.aqua_ipfs]
|
||||
external_api_multiaddr = "/ip4/127.0.0.1/tcp/5001"
|
||||
local_api_multiaddr = "/dns4/ipfs/tcp/5001"
|
||||
|
||||
[system_services.decider]
|
||||
decider_period_sec = 10
|
||||
worker_ipfs_multiaddr = "/dns4/ipfs/tcp/5001"
|
||||
network_api_endpoint = "http://chain:8545"
|
||||
network_id = 31_337
|
||||
start_block = "earliest"
|
||||
matcher_address = "0x0e1F3B362E22B2Dc82C9E35d6e62998C7E8e2349"
|
||||
wallet_key = "0xdacd4b197ee7e9efdd5db1921c6c558d88e2c8b69902b8bafc812fb226a6b5e0"
|
102
examples/archived/1-registry/.fluence/docker-compose.yaml
Normal file
102
examples/archived/1-registry/.fluence/docker-compose.yaml
Normal file
@ -0,0 +1,102 @@
|
||||
# yaml-language-server: $schema=schemas/docker-compose.json
|
||||
|
||||
# Defines a multi-containers based application.
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/cli/tree/main/docs/configs/docker-compose.md
|
||||
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
chain:
|
||||
image: fluencelabs/chain-rpc:0.2.20
|
||||
ports:
|
||||
- 8545:8545
|
||||
ipfs:
|
||||
image: ipfs/go-ipfs
|
||||
ports:
|
||||
- 5001:5001
|
||||
- 4001:4001
|
||||
environment:
|
||||
IPFS_PROFILE: server
|
||||
volumes:
|
||||
- ./ipfs/:/container-init.d/
|
||||
nox-0:
|
||||
image: fluencelabs/nox:0.16.3
|
||||
pull_policy: always
|
||||
ports:
|
||||
- 7771:7771
|
||||
- 9991:9991
|
||||
environment:
|
||||
WASM_LOG: info
|
||||
RUST_LOG: debug,particle_reap=debug,aquamarine=warn,aquamarine::particle_functions=debug,aquamarine::log=debug,aquamarine::aqua_runtime=error,ipfs_effector=off,ipfs_pure=off,system_services=debug,marine_core::module::marine_module=info,tokio_threadpool=info,tokio_reactor=info,mio=info,tokio_io=info,soketto=info,yamux=info,multistream_select=info,libp2p_secio=info,libp2p_websocket::framed=info,libp2p_ping=info,libp2p_core::upgrade::apply=info,libp2p_kad::kbucket=info,cranelift_codegen=info,wasmer_wasi=info,cranelift_codegen=info,wasmer_wasi=info,run-console=trace,wasmtime_cranelift=off,wasmtime_jit=off,libp2p_tcp=off,libp2p_swarm=off,particle_protocol::libp2p_protocol::upgrade=info,libp2p_mplex=off,particle_reap=off,netlink_proto=warn
|
||||
FLUENCE_MAX_SPELL_PARTICLE_TTL: 9s
|
||||
FLUENCE_ROOT_KEY_PAIR__PATH: /run/secrets/nox-0
|
||||
command:
|
||||
- --config=/run/configs/nox-0_Config.toml
|
||||
- --external-maddrs
|
||||
- /dns4/nox-0/tcp/7771
|
||||
- /dns4/nox-0/tcp/9991/ws
|
||||
- --allow-private-ips
|
||||
- --local
|
||||
depends_on:
|
||||
- ipfs
|
||||
volumes:
|
||||
- ./configs/nox-0_Config.toml:/run/configs/nox-0_Config.toml
|
||||
secrets:
|
||||
- nox-0
|
||||
nox-1:
|
||||
image: fluencelabs/nox:0.16.3
|
||||
pull_policy: always
|
||||
ports:
|
||||
- 7772:7772
|
||||
- 9992:9992
|
||||
environment:
|
||||
WASM_LOG: info
|
||||
RUST_LOG: debug,particle_reap=debug,aquamarine=warn,aquamarine::particle_functions=debug,aquamarine::log=debug,aquamarine::aqua_runtime=error,ipfs_effector=off,ipfs_pure=off,system_services=debug,marine_core::module::marine_module=info,tokio_threadpool=info,tokio_reactor=info,mio=info,tokio_io=info,soketto=info,yamux=info,multistream_select=info,libp2p_secio=info,libp2p_websocket::framed=info,libp2p_ping=info,libp2p_core::upgrade::apply=info,libp2p_kad::kbucket=info,cranelift_codegen=info,wasmer_wasi=info,cranelift_codegen=info,wasmer_wasi=info,run-console=trace,wasmtime_cranelift=off,wasmtime_jit=off,libp2p_tcp=off,libp2p_swarm=off,particle_protocol::libp2p_protocol::upgrade=info,libp2p_mplex=off,particle_reap=off,netlink_proto=warn
|
||||
FLUENCE_MAX_SPELL_PARTICLE_TTL: 9s
|
||||
FLUENCE_ROOT_KEY_PAIR__PATH: /run/secrets/nox-1
|
||||
command:
|
||||
- --config=/run/configs/nox-1_Config.toml
|
||||
- --external-maddrs
|
||||
- /dns4/nox-1/tcp/7772
|
||||
- /dns4/nox-1/tcp/9992/ws
|
||||
- --allow-private-ips
|
||||
- --bootstraps=/dns/nox-0/tcp/7771
|
||||
depends_on:
|
||||
- ipfs
|
||||
volumes:
|
||||
- ./configs/nox-1_Config.toml:/run/configs/nox-1_Config.toml
|
||||
secrets:
|
||||
- nox-1
|
||||
nox-2:
|
||||
image: fluencelabs/nox:0.16.3
|
||||
pull_policy: always
|
||||
ports:
|
||||
- 7773:7773
|
||||
- 9993:9993
|
||||
environment:
|
||||
WASM_LOG: info
|
||||
RUST_LOG: debug,particle_reap=debug,aquamarine=warn,aquamarine::particle_functions=debug,aquamarine::log=debug,aquamarine::aqua_runtime=error,ipfs_effector=off,ipfs_pure=off,system_services=debug,marine_core::module::marine_module=info,tokio_threadpool=info,tokio_reactor=info,mio=info,tokio_io=info,soketto=info,yamux=info,multistream_select=info,libp2p_secio=info,libp2p_websocket::framed=info,libp2p_ping=info,libp2p_core::upgrade::apply=info,libp2p_kad::kbucket=info,cranelift_codegen=info,wasmer_wasi=info,cranelift_codegen=info,wasmer_wasi=info,run-console=trace,wasmtime_cranelift=off,wasmtime_jit=off,libp2p_tcp=off,libp2p_swarm=off,particle_protocol::libp2p_protocol::upgrade=info,libp2p_mplex=off,particle_reap=off,netlink_proto=warn
|
||||
FLUENCE_MAX_SPELL_PARTICLE_TTL: 9s
|
||||
FLUENCE_ROOT_KEY_PAIR__PATH: /run/secrets/nox-2
|
||||
command:
|
||||
- --config=/run/configs/nox-2_Config.toml
|
||||
- --external-maddrs
|
||||
- /dns4/nox-2/tcp/7773
|
||||
- /dns4/nox-2/tcp/9993/ws
|
||||
- --allow-private-ips
|
||||
- --bootstraps=/dns/nox-0/tcp/7771
|
||||
depends_on:
|
||||
- ipfs
|
||||
volumes:
|
||||
- ./configs/nox-2_Config.toml:/run/configs/nox-2_Config.toml
|
||||
secrets:
|
||||
- nox-2
|
||||
|
||||
secrets:
|
||||
nox-0:
|
||||
file: secrets/nox-0.txt
|
||||
nox-1:
|
||||
file: secrets/nox-1.txt
|
||||
nox-2:
|
||||
file: secrets/nox-2.txt
|
17
examples/archived/1-registry/.fluence/workers.yaml
Normal file
17
examples/archived/1-registry/.fluence/workers.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
# yaml-language-server: $schema=schemas/workers.json
|
||||
|
||||
# A result of app deployment. This file is created automatically after successful deployment using `fluence workers deploy` command
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/cli/tree/main/docs/configs/workers.md
|
||||
|
||||
version: 1
|
||||
|
||||
deals:
|
||||
local:
|
||||
dealName:
|
||||
definition: bafkreidqtqpmmferdscg4bqrs74cl6ckib3vyhvejhrc4watln5xxcrj2i
|
||||
timestamp: 2023-12-19T20:01:24.334Z
|
||||
dealIdOriginal: "0xEb92A1B5c10AD7BFdcaf23Cb7DDA9ea062CD07E8"
|
||||
dealId: eb92a1b5c10ad7bfdcaf23cb7dda9ea062cd07e8
|
||||
chainNetwork: local
|
||||
chainNetworkId: 31337
|
12
examples/archived/1-registry/.gitignore
vendored
Normal file
12
examples/archived/1-registry/.gitignore
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
.idea
|
||||
.DS_Store
|
||||
/.fluence/secrets
|
||||
/.fluence/env.yaml
|
||||
/.fluence/schemas
|
||||
/.fluence/tmp
|
||||
**/node_modules
|
||||
**/target/
|
||||
.repl_history
|
||||
/.vscode/settings.json
|
||||
/src/ts/src/aqua
|
||||
/src/js/src/aqua
|
6
examples/archived/1-registry/.vscode/extensions.json
vendored
Normal file
6
examples/archived/1-registry/.vscode/extensions.json
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"redhat.vscode-yaml",
|
||||
"FluenceLabs.aqua"
|
||||
]
|
||||
}
|
2847
examples/archived/1-registry/Cargo.lock
generated
Normal file
2847
examples/archived/1-registry/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
2
examples/archived/1-registry/Cargo.toml
Normal file
2
examples/archived/1-registry/Cargo.toml
Normal file
@ -0,0 +1,2 @@
|
||||
[workspace]
|
||||
members = [ "src/services/echo_service/modules/echo_service" ]
|
164
examples/archived/1-registry/README.md
Normal file
164
examples/archived/1-registry/README.md
Normal file
@ -0,0 +1,164 @@
|
||||
# Services advertisement and discovery
|
||||
|
||||
## Overview
|
||||
|
||||
This example shows how to use Registry to discover and call fluence services without having their exact peer and service ids.
|
||||
|
||||
## Table of contents:
|
||||
|
||||
- [Services advertisement and discovery](#services-advertisement-and-discovery)
|
||||
- [Overview](#overview)
|
||||
- [Table of contents:](#table-of-contents)
|
||||
- [Set up the environment](#set-up-the-environment)
|
||||
- [Deploy echo service written in Rust](#deploy-echo-service-written-in-rust)
|
||||
- [Run echo service written in JS/TS](#run-echo-service-written-in-jsts)
|
||||
- [Register both services using Registry](#register-both-services-using-registry)
|
||||
- [Call both services using resourceId](#call-both-services-using-resourceid)
|
||||
- [Remove service record](#remove-service-record)
|
||||
|
||||
## Set up the environment
|
||||
|
||||
1. [Install the latest version of Fluence CLI](https://github.com/fluencelabs/cli#installation-and-usage)
|
||||
2. Install Fluence project dependencies. It may take a while:
|
||||
```sh
|
||||
fluence dep i
|
||||
```
|
||||
3. Install JS dependencies:
|
||||
```sh
|
||||
npm i
|
||||
```
|
||||
You can also use VSCode with [Aqua extension](https://marketplace.visualstudio.com/items?itemName=FluenceLabs.aqua) for [Aqua language](https://fluence.dev/docs/aqua-book/getting-started/) syntax highlighting and better developer experience.
|
||||
|
||||
## Deploy echo service written in Rust
|
||||
|
||||
To deploy the Fluence application execute
|
||||
```sh
|
||||
fluence deploy
|
||||
```
|
||||
Press Enter when prompted `? Do you want to deploy all of these services? (Y/n)`
|
||||
|
||||
This Fluence application, described in [fluence.yaml](fluence.yaml), consists of just one [echo service](./echo_service) which has only one [module](./echo_service/modules/echo_service/) written in Rust. [The module code](echo_service/modules/echo_service/src/main.rs) has only one function [echo](echo_service/modules/echo_service/src/main.rs#L9), which returns your `msg` along with peerId of the host:
|
||||
|
||||
To call [echo](src/aqua/main.aqua#L8) aqua function execute:
|
||||
```sh
|
||||
fluence run -f 'echo("hi")'
|
||||
```
|
||||
The function uses `peerId` and `serviceId`, which Fluence CLI stored in `./.fluence/app.yaml` when you deployed the Fluence application in the previous step.
|
||||
|
||||
You should see output similar to this:
|
||||
```
|
||||
"12D3KooWFEwNWcHqi9rtsmDhsYcDbRUCDXH84RC4FW6UfsFWaoHi: hi"
|
||||
```
|
||||
|
||||
It means we successfully deployed our echo service, and anyone can call it if they have `peerId` and `serviceId`
|
||||
|
||||
## Run echo service written in JS/TS
|
||||
|
||||
Execute
|
||||
```sh
|
||||
npm run start
|
||||
```
|
||||
|
||||
First, aqua code in [src/aqua/export.aqua](src/aqua/export.aqua) will be compiled to typescript and you will see it in [src/generated/export.ts](src/generated/export.ts).
|
||||
|
||||
Then you possibly will have to confirm ts-node installation and [src/echo.ts](src/echo.ts) will be executed. It registers local js service with serviceId "echo", so anyone who has `relayId`, `peerId` and `serviceId` ("echo") will be able to call it. Copy the command from the terminal, which will look similar to this:
|
||||
```sh
|
||||
fluence run -f 'echoJS("12D3KooWCmnhnGvKTqEXpVLzdrYu3TkQ3HcLyArGJpLPooJQ69dN", "12D3KooWSD5PToNiLQwKDXsu8JSysCwUt8BVUJEqCHcDe7P5h45e", "echo", "hi")'
|
||||
```
|
||||
This command executes [echoJS](src/aqua/main.aqua#L16) aqua function with arguments: relayId, peerId, serviceId and msg
|
||||
|
||||
Open another terminal in the same directory, paste the command and run it.
|
||||
|
||||
You should see output similar to this:
|
||||
```
|
||||
"12D3KooWCmnhnGvKTqEXpVLzdrYu3TkQ3HcLyArGJpLPooJQ69dN: hi"
|
||||
```
|
||||
|
||||
It means anyone can call our `echo` service, written in TS/JS, if they have `relayId`, `peerId` and `serviceId`.
|
||||
## Register both services using Registry
|
||||
|
||||
We can register our services in Registry if we want anyone to be able to call our services without specifying the exact relay, peer, and service IDs.
|
||||
|
||||
First, we need to create the Resource. The Resource represents a group of services and has a corresponding `resourceId` which we can use for service discovery.
|
||||
|
||||
To call [createRes](src/aqua/main.aqua#L22) aqua function, execute
|
||||
```sh
|
||||
fluence run -f 'createRes()'
|
||||
```
|
||||
It uses `createResource` function from Resources API to register the Resource with the label `echo`.
|
||||
You should see output similar to this:
|
||||
|
||||
```
|
||||
5pYpWB3ozi6fi1EjNs9X5kE156aA6iLECxTuVdJgUaLB
|
||||
```
|
||||
|
||||
It is `resourceId`, which we will use to register our services, and then we will be able to use the same `resourceId` to discover and call our services
|
||||
|
||||
To register the `echo` service written in Rust, replace `RESOURCE_ID` and execute
|
||||
```sh
|
||||
fluence run -f 'registerEchoService("RESOURCE_ID")'
|
||||
```
|
||||
This command calls [registerEchoService](src/aqua/main.aqua#L26) aqua function, which uses `registerService` function from Resources API to register the rust service on this `resourceId`
|
||||
|
||||
You should see this output:
|
||||
```
|
||||
[
|
||||
true,
|
||||
[]
|
||||
]
|
||||
```
|
||||
It means the service is registered in Registry and should be accessible by anyone who only has the `resourceId` of this service.
|
||||
|
||||
Then please stop fluence js peer in the previous terminal that you ran.
|
||||
|
||||
To register echo service written in JS/TS on the Resource, replace `RESOURCE_ID` and execute
|
||||
```sh
|
||||
npm run start -- 'RESOURCE_ID'
|
||||
```
|
||||
## Call both services using resourceId
|
||||
Go to a different terminal in the same directory, replace `RESOURCE_ID` and execute this command to call [echoAll](src/aqua/main.aqua#L33) aqua function
|
||||
```sh
|
||||
fluence run -f 'echoAll("RESOURCE_ID", "hi")'
|
||||
```
|
||||
It uses `resourceId` to resolve a minimum of two records with peer and service ids and then uses them to call our services
|
||||
|
||||
You should see output similar to this:
|
||||
```
|
||||
[
|
||||
[
|
||||
"12D3KooWFEwNWcHqi9rtsmDhsYcDbRUCDXH84RC4FW6UfsFWaoHi: hi",
|
||||
"12D3KooWCmnhnGvKTqEXpVLzdrYu3TkQ3HcLyArGJpLPooJQ69dN: hi"
|
||||
]
|
||||
]
|
||||
```
|
||||
It means we successfully registered our services using Registry, and now anyone can call these services using only `resourceId`.
|
||||
|
||||
## Remove service record
|
||||
Replace `RESOURCE_ID` and execute
|
||||
```sh
|
||||
fluence run -f 'unregisterEchoService("RESOURCE_ID")'
|
||||
```
|
||||
to call [unregisterEchoService](src/aqua/main.aqua#L43) function that uses `unregisterService` function from Resources API to unregister only our echo services written in Rust
|
||||
|
||||
The output should look like this:
|
||||
```
|
||||
[
|
||||
[
|
||||
true
|
||||
]
|
||||
]
|
||||
```
|
||||
Let's make sure we've removed the service record. Once again, replace `RESOURCE_ID` and execute
|
||||
```sh
|
||||
fluence run -f 'echoAll("RESOURCE_ID", "hi")'
|
||||
```
|
||||
|
||||
You should see output similar to this:
|
||||
```
|
||||
[
|
||||
[
|
||||
"12D3KooWCmnhnGvKTqEXpVLzdrYu3TkQ3HcLyArGJpLPooJQ69dN: hi"
|
||||
]
|
||||
]
|
||||
```
|
||||
You can notice that only one result is left instead of two. It means we successfully removed the service record from our Resource
|
24
examples/archived/1-registry/fluence.yaml
Normal file
24
examples/archived/1-registry/fluence.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
# yaml-language-server: $schema=.fluence/schemas/fluence.json
|
||||
|
||||
# Defines Fluence Project, most importantly - what exactly you want to deploy and how. You can use `fluence init` command to generate a template for new Fluence project
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/cli/tree/main/docs/configs/fluence.md
|
||||
|
||||
version: 5
|
||||
|
||||
aquaInputPath: src/aqua/main.aqua
|
||||
|
||||
deals:
|
||||
dealName:
|
||||
minWorkers: 1
|
||||
targetWorkers: 3
|
||||
services: [ echo_service ]
|
||||
spells: []
|
||||
|
||||
services:
|
||||
echo_service:
|
||||
get: src/services/echo_service
|
||||
|
||||
relaysPath: src/frontend/src
|
||||
|
||||
aquaOutputTSPath: src/frontend/src/compiled-aqua
|
32
examples/archived/1-registry/provider.yaml
Normal file
32
examples/archived/1-registry/provider.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
# yaml-language-server: $schema=.fluence/schemas/provider.json
|
||||
|
||||
# Defines config used for provider set up
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/cli/tree/main/docs/configs/provider.md
|
||||
|
||||
version: 0
|
||||
|
||||
env: local
|
||||
|
||||
nox:
|
||||
systemServices:
|
||||
enable:
|
||||
- registry
|
||||
- decider
|
||||
|
||||
computePeers:
|
||||
nox-0:
|
||||
computeUnits: 1
|
||||
nox-1:
|
||||
computeUnits: 1
|
||||
nox-2:
|
||||
computeUnits: 1
|
||||
|
||||
offers:
|
||||
offer-0:
|
||||
maxCollateralPerWorker: 1
|
||||
minPricePerWorkerEpoch: 0.1
|
||||
computePeers:
|
||||
- nox-0
|
||||
- nox-1
|
||||
- nox-2
|
73
examples/archived/1-registry/src/aqua/main.aqua
Normal file
73
examples/archived/1-registry/src/aqua/main.aqua
Normal file
@ -0,0 +1,73 @@
|
||||
import "@fluencelabs/aqua-lib/builtin.aqua"
|
||||
import "@fluencelabs/aqua-lib/subnet.aqua"
|
||||
import createResource, registerService, resolveResource from "@fluencelabs/registry/resources-api.aqua"
|
||||
|
||||
use "deals.aqua"
|
||||
use "hosts.aqua"
|
||||
import "services.aqua"
|
||||
|
||||
service EchoJSService:
|
||||
echo(msg: string) -> string
|
||||
|
||||
func echo(msg: string) -> string:
|
||||
deals <- Deals.get()
|
||||
dealId = deals.dealName!.dealIdOriginal
|
||||
|
||||
on HOST_PEER_ID:
|
||||
subnet <- Subnet.resolve(dealId)
|
||||
|
||||
if subnet.success == false:
|
||||
Console.print(["Failed to resolve subnet: ", subnet.error])
|
||||
|
||||
w = subnet.workers!
|
||||
|
||||
on w.worker_id! via w.host_id:
|
||||
res <- EchoService.echo(msg)
|
||||
<- res
|
||||
|
||||
|
||||
func echoJS(peerId: string, relayId: string, serviceId: string, msg: string) -> string:
|
||||
on peerId via relayId:
|
||||
EchoService serviceId
|
||||
res <- EchoService.echo(msg)
|
||||
<- res
|
||||
|
||||
func createRes(label: string) -> ?string:
|
||||
resourceId, error <- createResource(label)
|
||||
<- error
|
||||
|
||||
func registerResourceService(resourceId: string, serviceId: string) -> bool, *string:
|
||||
on HOST_PEER_ID:
|
||||
-- TODO: change hardcoded local peer to resolve
|
||||
res, message <- registerService(resourceId, "" , "12D3KooWBM3SdXWqGaawQDGQ6JprtwswEg3FWGvGhmgmMez1vRbR", [serviceId])
|
||||
<- res, message
|
||||
|
||||
|
||||
func echoAll(resourceId: string, msg: string) -> *string:
|
||||
-- 2 is the min number of peers we want to ask
|
||||
records <- resolveResource(resourceId, 2)
|
||||
results: *string
|
||||
for r <- records!:
|
||||
on HOST_PEER_ID:
|
||||
EchoService r.metadata.service_id!
|
||||
results <- EchoService.echo(msg)
|
||||
<- results
|
||||
|
||||
func showSubnets() -> *string:
|
||||
deals <- Deals.get()
|
||||
dealId = deals.dealName!.dealIdOriginal
|
||||
|
||||
on HOST_PEER_ID:
|
||||
results: *string
|
||||
subnet <- Subnet.resolve(dealId)
|
||||
|
||||
if subnet.success == false:
|
||||
Console.print(["Failed to resolve subnet: ", subnet.error])
|
||||
|
||||
for w <- subnet.workers:
|
||||
results <<- w.host_id
|
||||
|
||||
<- results
|
||||
|
||||
|
||||
|
3547
examples/archived/1-registry/src/frontend/package-lock.json
generated
Normal file
3547
examples/archived/1-registry/src/frontend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
24
examples/archived/1-registry/src/frontend/package.json
Normal file
24
examples/archived/1-registry/src/frontend/package.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"name": "echo",
|
||||
"version": "0.0.0",
|
||||
"description": "Fluence Peer with echo service",
|
||||
"scripts": {
|
||||
"start": "node --loader ts-node/esm src/echo.ts"
|
||||
},
|
||||
"keywords": [
|
||||
"aqua",
|
||||
"dht",
|
||||
"p2p"
|
||||
],
|
||||
"author": "Fluence Labs",
|
||||
"dependencies": {
|
||||
"@fluencelabs/js-client": "0.5.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@fluencelabs/registry": "^0.9.2",
|
||||
"ts-node": "10.9.2",
|
||||
"typescript": "5.0.2"
|
||||
}
|
||||
}
|
83
examples/archived/1-registry/src/frontend/src/echo.ts
Normal file
83
examples/archived/1-registry/src/frontend/src/echo.ts
Normal file
@ -0,0 +1,83 @@
|
||||
/**
|
||||
* Copyright 2022 Fluence Labs Limited
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import { Fluence, KeyPair } from '@fluencelabs/js-client';
|
||||
import { registerEchoJSService } from './compiled-aqua/main.ts';
|
||||
|
||||
// don't store your secret key in the code. This is just for the example
|
||||
const secretKey = "Iz3HUmNIB78lkNNVmMkDKrju0nCivtkJNyObrFAr774=";
|
||||
|
||||
async function main() {
|
||||
const keyPair = await KeyPair.fromEd25519SK(Buffer.from(secretKey, "base64"));
|
||||
|
||||
await Fluence.connect({
|
||||
multiaddr:
|
||||
"/ip4/127.0.0.1/tcp/9991/ws/p2p/12D3KooWBM3SdXWqGaawQDGQ6JprtwswEg3FWGvGhmgmMez1vRbR",
|
||||
peerId: "12D3KooWBM3SdXWqGaawQDGQ6JprtwswEg3FWGvGhmgmMez1vRbR",
|
||||
}, { keyPair: {
|
||||
type: 'Ed25519',
|
||||
source: keyPair.toEd25519PrivateKey()
|
||||
}});
|
||||
|
||||
const peerId = Fluence.getClient().getPeerId();
|
||||
const relayId = Fluence.getClient().getRelayPeerId();
|
||||
|
||||
console.log(`📗 created a fluence peer ${peerId} with relay ${relayId}`);
|
||||
|
||||
const serviceId = "echo";
|
||||
|
||||
// register local service with serviceId "echo"
|
||||
registerEchoJSService(serviceId, {
|
||||
echo(msg) {
|
||||
console.log(`Received message: ${msg}`);
|
||||
return `${peerId}: ${msg}`;
|
||||
},
|
||||
});
|
||||
|
||||
const resourceId = process.argv[2];
|
||||
|
||||
// don't register if resource id isn't passed
|
||||
if (resourceId === undefined) {
|
||||
console.log(
|
||||
`
|
||||
Copy this code to call this service:
|
||||
|
||||
fluence run -f 'echoJS("${peerId}", "${relayId}", "${serviceId}", "hi")'`
|
||||
);
|
||||
} else {
|
||||
// const [success, error] = await registerService(
|
||||
// resourceId,
|
||||
// "echo",
|
||||
// peerId,
|
||||
// serviceId
|
||||
// );
|
||||
// console.log(`Registration result: ${success || error}`);
|
||||
}
|
||||
|
||||
console.log("\nPress any key to stop fluence js peer");
|
||||
|
||||
// this code keeps fluence client running till any key pressed
|
||||
process.stdin.setRawMode(true);
|
||||
process.stdin.resume();
|
||||
process.stdin.on("data", async () => {
|
||||
await Fluence.disconnect();
|
||||
process.exit(0);
|
||||
});
|
||||
}
|
||||
|
||||
main().catch((error) => {
|
||||
console.error(error);
|
||||
process.exit(1);
|
||||
});
|
14
examples/archived/1-registry/src/frontend/src/relays.json
Normal file
14
examples/archived/1-registry/src/frontend/src/relays.json
Normal file
@ -0,0 +1,14 @@
|
||||
[
|
||||
{
|
||||
"multiaddr": "/ip4/127.0.0.1/tcp/9991/ws/p2p/12D3KooWJTYHn4U8jJtL1XZvTonAgv2Tn6EEbZSauw56dhr3SNKg",
|
||||
"peerId": "12D3KooWJTYHn4U8jJtL1XZvTonAgv2Tn6EEbZSauw56dhr3SNKg"
|
||||
},
|
||||
{
|
||||
"multiaddr": "/ip4/127.0.0.1/tcp/9992/ws/p2p/12D3KooWQrMQg2Ksqag5465Tnu8VQH3c4Z4NSosdS854bAsHEcwo",
|
||||
"peerId": "12D3KooWQrMQg2Ksqag5465Tnu8VQH3c4Z4NSosdS854bAsHEcwo"
|
||||
},
|
||||
{
|
||||
"multiaddr": "/ip4/127.0.0.1/tcp/9993/ws/p2p/12D3KooWQCYhkDv4jPe7ymEo8AwRNMzLZRmfyrbV53vKpVS7fZA7",
|
||||
"peerId": "12D3KooWQCYhkDv4jPe7ymEo8AwRNMzLZRmfyrbV53vKpVS7fZA7"
|
||||
}
|
||||
]
|
23
examples/archived/1-registry/src/frontend/tsconfig.json
Normal file
23
examples/archived/1-registry/src/frontend/tsconfig.json
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"useDefineForClassFields": true,
|
||||
"module": "ESNext",
|
||||
"lib": ["ES2020", "DOM", "DOM.Iterable"],
|
||||
"skipLibCheck": true,
|
||||
|
||||
/* Bundler mode */
|
||||
"moduleResolution": "bundler",
|
||||
"allowImportingTsExtensions": true,
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"noEmit": true,
|
||||
|
||||
/* Linting */
|
||||
"strict": true,
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"noFallthroughCasesInSwitch": true
|
||||
},
|
||||
"include": ["src"]
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "echo_service"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
[[bin]]
|
||||
name = "echo_service"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
marine-rs-sdk = "0.10.2"
|
||||
|
||||
[dev-dependencies]
|
||||
marine-rs-sdk-test = "=0.12.0"
|
@ -0,0 +1,11 @@
|
||||
# yaml-language-server: $schema=../../../../../.fluence/schemas/module.json
|
||||
|
||||
# Defines [Marine Module](https://fluence.dev/docs/build/concepts/#modules). You can use `fluence module new` command to generate a template for new module
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/cli/tree/main/docs/configs/module.md
|
||||
|
||||
version: 0
|
||||
|
||||
type: rust
|
||||
|
||||
name: echo_service
|
@ -0,0 +1,11 @@
|
||||
use marine_rs_sdk::marine;
|
||||
use marine_rs_sdk::module_manifest;
|
||||
|
||||
module_manifest!();
|
||||
|
||||
pub fn main() {}
|
||||
|
||||
#[marine]
|
||||
pub fn echo(msg: String) -> String {
|
||||
format!("{}: {}", marine_rs_sdk::get_call_parameters().host_id, msg)
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
# yaml-language-server: $schema=../../../.fluence/schemas/service.json
|
||||
|
||||
# Defines a [Marine service](https://fluence.dev/docs/build/concepts/#services), most importantly the modules that the service consists of. You can use `fluence service new` command to generate a template for new service
|
||||
|
||||
# Documentation: https://github.com/fluencelabs/cli/tree/main/docs/configs/service.md
|
||||
|
||||
version: 0
|
||||
|
||||
name: echo_service
|
||||
|
||||
modules:
|
||||
facade:
|
||||
get: modules/echo_service
|
BIN
images/availability.png
Normal file
BIN
images/availability.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 99 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user