transition to registry (#82)

This commit is contained in:
Aleksey Proshutisnkiy 2022-02-24 16:37:58 +03:00 committed by GitHub
parent 845705df98
commit ab31e61d1c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 10103 additions and 2986 deletions

View File

@ -16,7 +16,7 @@ jobs:
sudo bash .github/download_marine.sh
- restore_cache:
keys:
- aqua-dht05-{{ checksum "service/Cargo.lock" }}
- registry05-{{ checksum "service/Cargo.lock" }}
- run: |
cd ./service
rustup target add wasm32-wasi
@ -28,7 +28,7 @@ jobs:
paths:
- ~/.cargo
- ~/.rustup
key: aqua-dht05-{{ checksum "service/Cargo.lock" }}
key: registry05-{{ checksum "service/Cargo.lock" }}
workflows:
version: 2

View File

@ -60,13 +60,13 @@ jobs:
- name: Install Rust
working-directory: ./service
run: |
rustup toolchain install nightly-2021-04-24-x86_64-unknown-linux-gnu
rustup default nightly-2021-04-24-x86_64-unknown-linux-gnu
rustup override set nightly-2021-04-24-x86_64-unknown-linux-gnu
rustup target add wasm32-wasi --toolchain nightly-2021-04-24-x86_64-unknown-linux-gnu
rustup toolchain install nightly-2022-01-16-x86_64-unknown-linux-gnu
rustup default nightly-2022-01-16-x86_64-unknown-linux-gnu
rustup override set nightly-2022-01-16-x86_64-unknown-linux-gnu
rustup target add wasm32-wasi --toolchain nightly-2022-01-16-x86_64-unknown-linux-gnu
### Build
- name: Build aqua-dht.wasm
- name: Build registry.wasm
working-directory: ./service
run: ./build.sh
@ -93,10 +93,10 @@ jobs:
id: release
uses: softprops/action-gh-release@v1
with:
name: aqua-dht ${{ env.RELEASE_VERSION }}
name: registry ${{ env.RELEASE_VERSION }}
tag_name: ${{ env.RELEASE_VERSION }}
files: |
aqua-dht.tar.gz
registry.tar.gz
body: ${{steps.changelog.outputs.changelog}}
draft: false
prerelease: false
@ -127,7 +127,7 @@ jobs:
release_id: "${{ steps.release.outputs.id }}",
});
console.dir(assets);
let package = assets.data.find((a) => a.name === 'aqua-dht.tar.gz');
let package = assets.data.find((a) => a.name === 'registry.tar.gz');
let url = package.browser_download_url;
console.log("URL: " + url);
return url;
@ -138,9 +138,9 @@ jobs:
- name: Calculate SHA256
run: |
du -hs aqua-dht.tar.gz
echo $(sha256sum aqua-dht.tar.gz)
echo "SHA256=$(sha256sum aqua-dht.tar.gz | awk '{ print $1 }')" >> $GITHUB_ENV
du -hs registry.tar.gz
echo $(sha256sum registry.tar.gz)
echo "SHA256=$(sha256sum registry.tar.gz | awk '{ print $1 }')" >> $GITHUB_ENV
- name: Update version in node-distro repo
uses: benc-uk/workflow-dispatch@v1
@ -150,7 +150,7 @@ jobs:
ref: 'main'
token: ${{ secrets.PERSONAL_TOKEN }}
inputs: '{
"name": "aqua-dht",
"name": "registry",
"version": "${{ env.RELEASE_VERSION }}",
"url": "${{ steps.package-url.outputs.result }}",
"sha256": "${{ env.SHA256 }}"
@ -162,4 +162,4 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
console.dir(core);
core.info("aqua-dht was updated to ${{ env.RELEASE_VERSION }} in node-distro repo");
core.info("registry was updated to ${{ env.RELEASE_VERSION }} in node-distro repo");

2
.gitignore vendored
View File

@ -1,7 +1,7 @@
service/target
builtin-package/*.wasm
builtin-package/scheduled/*.air
aqua-dht.tar.gz
registry.tar.gz
**/*.rs.bk
**/.idea

View File

@ -1,4 +1,4 @@
# aqua-dht
# Registry
[Distributed Hash Table](https://en.wikipedia.org/wiki/Distributed_hash_table) (DHT) implementation for the Fluence network with an Aqua interface.
@ -8,12 +8,12 @@ See [Aqua Book](https://fluence.dev/aqua-book/libraries/aqua-dht).
## How to Use
See [example](./example):
- How to call [`aqua-dht`](./example/src/example.ts) function in TS/JS
- Writing an Aqua script using `aqua-dht`: [event_example.aqua](./example/src/aqua/event_example.aqua)
- How to call [`registry`](./example/src/example.ts) function in TS/JS
- Writing an Aqua script using `registry`: [event_example.aqua](./example/src/aqua/event_example.aqua)
## API
API is defined in the [pubsub.aqua](./aqua/pubsub.aqua) module.
API is defined in the [routing.aqua](./aqua/routing.aqua) module.
## Learn Aqua

View File

@ -1,26 +0,0 @@
module AquaDHT.Scheduled declares *
export clearExpired_86400, replicate_3600
import "dht.aqua"
import "@fluencelabs/aqua-lib/builtin.aqua"
-- clears expired records
func clearExpired_86400():
on HOST_PEER_ID:
t <- Peer.timestamp_sec()
AquaDHT.clear_expired(t)
-- get all old records and replicate it by keys
func replicate_3600():
on HOST_PEER_ID:
t <- Peer.timestamp_sec()
res <- AquaDHT.evict_stale(t)
for r <- res.results par:
k <- Op.string_to_b58(r.key.key)
nodes <- Kademlia.neighborhood(k, nil, nil)
for n <- nodes par:
on n:
tt <- Peer.timestamp_sec()
AquaDHT.republish_key(r.key, tt)
AquaDHT.republish_values(r.key.key, r.records, tt)

View File

@ -1,82 +0,0 @@
module AquaDHT declares *
data ClearExpiredResult:
success: bool
error: string
count_keys: u64
count_values: u64
data DhtResult:
success: bool
error: string
data Key:
key: string
peer_id: string
timestamp_created: u64
pinned: bool
weight: u32
data Record:
value: string
peer_id: string
set_by: string
relay_id: []string
service_id: []string
timestamp_created: u64
weight: u32
data EvictStaleItem:
key: Key
records: []Record
data EvictStaleResult:
success: bool
error: string
results: []EvictStaleItem
data GetKeyMetadataResult:
success: bool
error: string
key: Key
data GetValuesResult:
success: bool
error: string
result: []Record
data MergeResult:
success: bool
error: string
result: []Record
data PutHostValueResult:
success: bool
error: string
key: string
value: []Record
data RepublishValuesResult:
success: bool
error: string
updated: u64
service AquaDHT("aqua-dht"):
clear_expired(current_timestamp_sec: u64) -> ClearExpiredResult
clear_host_value(key: string, current_timestamp_sec: u64) -> DhtResult
evict_stale(current_timestamp_sec: u64) -> EvictStaleResult
get_key_metadata(key: string, current_timestamp_sec: u64) -> GetKeyMetadataResult
get_values(key: string, current_timestamp_sec: u64) -> GetValuesResult
merge(records: [][]Record) -> MergeResult
merge_hack_get_values(records: []GetValuesResult) -> MergeResult
merge_two(a: []Record, b: []Record) -> MergeResult
propagate_host_value(set_host_value: PutHostValueResult, current_timestamp_sec: u64, weight: u32) -> DhtResult
put_host_value(key: string, value: string, current_timestamp_sec: u64, relay_id: []string, service_id: []string, weight: u32) -> PutHostValueResult
put_value(key: string, value: string, current_timestamp_sec: u64, relay_id: []string, service_id: []string, weight: u32) -> DhtResult
register_key(key: string, current_timestamp_sec: u64, pin: bool, weight: u32) -> DhtResult
renew_host_value(key: string, current_timestamp_sec: u64) -> DhtResult
republish_key(key: Key, current_timestamp_sec: u64) -> DhtResult
republish_values(key: string, records: []Record, current_timestamp_sec: u64) -> RepublishValuesResult
set_expired_timeout(timeout_sec: u64)
set_host_expired_timeout(timeout_sec: u64)
set_stale_timeout(timeout_sec: u64)

56
aqua/package-lock.json generated
View File

@ -1,8 +1,46 @@
{
"name": "@fluencelabs/aqua-dht",
"name": "@fluencelabs/registry",
"version": "0.2.0",
"lockfileVersion": 1,
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@fluencelabs/registry",
"version": "0.2.0",
"license": "MIT",
"dependencies": {
"@fluencelabs/aqua-lib": "^0.3.4",
"@fluencelabs/trust-graph": "^3.0.0"
},
"devDependencies": {
"@fluencelabs/aqua": "0.3.1-228"
}
},
"node_modules/@fluencelabs/aqua": {
"version": "0.3.1-228",
"resolved": "https://registry.npmjs.org/@fluencelabs/aqua/-/aqua-0.3.1-228.tgz",
"integrity": "sha512-uTgCNa+tBhUyl18Ejq0CpoJxxD3CsOI4+BU64Hdkm9LNE9nu94k8JDYtTKKIAJdhk4kQ3qHy6HHtwVSWTPDPHQ==",
"dev": true,
"bin": {
"aqua": "index.js",
"aqua-cli": "error.js",
"aqua-j": "index-java.js"
}
},
"node_modules/@fluencelabs/aqua-lib": {
"version": "0.3.4",
"resolved": "https://registry.npmjs.org/@fluencelabs/aqua-lib/-/aqua-lib-0.3.4.tgz",
"integrity": "sha512-DH7TZgflTJDxK18URlELnHI65jYtZf7b5e25gjRL9AiT/nvdodQqSvCcdVKK/jvhPy9q3RXM/rOcHfqh5mmpSQ=="
},
"node_modules/@fluencelabs/trust-graph": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@fluencelabs/trust-graph/-/trust-graph-3.0.0.tgz",
"integrity": "sha512-eV3LH+vKakivDCq6K4oC0xeZNwFJMtZDoIebtLXiJwHxx4zxtWUXCuVTH4T5zFrCHl4K74BVEFZ4lB13bu9E9g==",
"dependencies": {
"@fluencelabs/aqua-lib": "^0.3.1"
}
}
},
"dependencies": {
"@fluencelabs/aqua": {
"version": "0.3.1-228",
@ -11,9 +49,17 @@
"dev": true
},
"@fluencelabs/aqua-lib": {
"version": "0.1.14",
"resolved": "https://registry.npmjs.org/@fluencelabs/aqua-lib/-/aqua-lib-0.1.14.tgz",
"integrity": "sha512-H2Q4gIvociUxc4J2mwmH0D+mrU2N2Z+enKCHgBCanMVEE2wZDsZ80GTbDKsQjEq+gpqbnJIk8lJBYW6lyvLJTg=="
"version": "0.3.4",
"resolved": "https://registry.npmjs.org/@fluencelabs/aqua-lib/-/aqua-lib-0.3.4.tgz",
"integrity": "sha512-DH7TZgflTJDxK18URlELnHI65jYtZf7b5e25gjRL9AiT/nvdodQqSvCcdVKK/jvhPy9q3RXM/rOcHfqh5mmpSQ=="
},
"@fluencelabs/trust-graph": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/@fluencelabs/trust-graph/-/trust-graph-3.0.0.tgz",
"integrity": "sha512-eV3LH+vKakivDCq6K4oC0xeZNwFJMtZDoIebtLXiJwHxx4zxtWUXCuVTH4T5zFrCHl4K74BVEFZ4lB13bu9E9g==",
"requires": {
"@fluencelabs/aqua-lib": "^0.3.1"
}
}
}
}

View File

@ -1,12 +1,13 @@
{
"name": "@fluencelabs/aqua-dht",
"version": "0.2.0",
"description": "Aqua DHT library",
"name": "@fluencelabs/registry",
"version": "0.1.0",
"description": "Aqua Registry library",
"files": [
"*.aqua"
],
"dependencies": {
"@fluencelabs/aqua-lib": "^0.1.14"
"@fluencelabs/aqua-lib": "^0.3.4",
"@fluencelabs/trust-graph": "^3.0.0"
},
"scripts": {
"compile-aqua": "aqua -i . -o ./target/typescript",
@ -15,7 +16,7 @@
},
"repository": {
"type": "git",
"url": "git+https://github.com/fluencelabs/aqua-dht.git",
"url": "git+https://github.com/fluencelabs/registry.git",
"directory": "aqua"
},
"keywords": [
@ -25,9 +26,9 @@
"author": "Fluence Labs",
"license": "MIT",
"bugs": {
"url": "https://github.com/fluencelabs/aqua-dht/issues"
"url": "https://github.com/fluencelabs/registry/issues"
},
"homepage": "https://github.com/fluencelabs/aqua-dht",
"homepage": "https://github.com/fluencelabs/registry",
"devDependencies": {
"@fluencelabs/aqua": "0.3.1-228"
}

View File

@ -1,123 +0,0 @@
module AquaDHT.PubSub declares *
import "dht.aqua"
import "@fluencelabs/aqua-lib/builtin.aqua"
-- Get peers closest to the topic's hash in Kademlia network
-- These peers are expected to store list of subscribers of this topic
func getNeighbours(topic: string) -> []PeerId:
k <- Op.string_to_b58(topic)
nodes <- Kademlia.neighborhood(k, nil, nil)
<- nodes
-- If this peer have set node_id as a subscriber for topic,
-- this call will prevent subscriber from re-subscribing
-- so that eventually it will disappear from the subscribers list
func removeSubscriber(topic: string):
on HOST_PEER_ID:
t <- Peer.timestamp_sec()
AquaDHT.clear_host_value(topic, t)
-- Create a topic: register it on the closest peers
func initTopic(topic: string):
on HOST_PEER_ID:
nodes <- getNeighbours(topic)
for n <- nodes par:
on n:
try:
t <- Peer.timestamp_sec()
AquaDHT.register_key(topic, t, false, 0)
-- Create a topic and subscribe to it
-- %init_peer_id% (current client) will become a subscriber
func initTopicAndSubscribe(topic: string, value: string, relay_id: ?PeerId, service_id: ?string):
on HOST_PEER_ID:
nodes <- getNeighbours(topic)
for n <- nodes par:
on n:
try:
t <- Peer.timestamp_sec()
AquaDHT.register_key(topic, t, false, 0)
AquaDHT.put_value(topic, value, t, relay_id, service_id, 0)
-- Create a topic and subscribe to it
-- %init_peer_id% (current client) will become a subscriber
-- In contrast with non-blocking version, waits for at least a single write to succeed
func initTopicAndSubscribeBlocking(
topic: string, value: string,
relay_id: ?PeerId, service_id: ?string,
progress: string -> ()
) -> DhtResult:
results: *DhtResult
on HOST_PEER_ID:
nodes <- getNeighbours(topic)
for n <- nodes par:
on n:
try:
t <- Peer.timestamp_sec()
AquaDHT.register_key(topic, t, false, 0)
result <- AquaDHT.put_value(topic, value, t, relay_id, service_id, 0)
if result.success:
results <<- result
progress(n)
<- results!0
-- Create a topic and make the given node a subscriber to it
func initTopicAndSubscribeNode(subscriber_node_id: PeerId, topic: string, value: string, service_id: ?string):
on subscriber_node_id:
t <- Peer.timestamp_sec()
AquaDHT.register_key(topic, t, false, 0)
r <- AquaDHT.put_host_value(topic, value, t, nil, service_id, 0)
nodes <- getNeighbours(topic)
for n <- nodes par:
on n:
try:
tt <- Peer.timestamp_sec()
AquaDHT.register_key(topic, tt, false, 0)
AquaDHT.propagate_host_value(r, tt, 0)
-- Subscribe to a topic
-- Note: topic must be already initiated
func subscribe(topic: string, value: string, relay_id: ?PeerId, service_id: ?string):
on HOST_PEER_ID:
nodes <- getNeighbours(topic)
for n <- nodes par:
on n:
try:
t <- Peer.timestamp_sec()
AquaDHT.put_value(topic, value, t, relay_id, service_id, 0)
-- Subscribe a node to the given topic
-- Note: topic must be already initiated
func subscribeNode(subscriber_node_id: PeerId, topic: string, value: string, service_id: ?string):
on subscriber_node_id:
t <- Peer.timestamp_sec()
r <- AquaDHT.put_host_value(topic, value, t, nil, service_id, 0)
nodes <- getNeighbours(topic)
for n <- nodes par:
on n:
try:
tt <- Peer.timestamp_sec()
AquaDHT.register_key(topic, tt, false, 0)
AquaDHT.propagate_host_value(r, tt, 0)
-- Find the list of subscribers for the given topic
func findSubscribers(topic: string) -> []Record:
on HOST_PEER_ID:
nodes <- getNeighbours(topic)
res: *GetValuesResult
for n <- nodes par:
on n:
try:
t <- Peer.timestamp_sec()
res <- AquaDHT.get_values(topic, t)
v <- AquaDHT.merge_two(res!.result, res!1.result)
<- v.result
-- Execute the given code on subscribers
-- Note that you can provide another Aqua function as an argument to this one
func executeOnSubscribers(topic: string, call: Record -> ()):
subs <- findSubscribers(topic)
for r <- subs par:
on r.peer_id via r.relay_id:
call(r)

54
aqua/registry-api.aqua Normal file
View File

@ -0,0 +1,54 @@
import "registry.aqua"
import PeerId, Peer from "@fluencelabs/aqua-lib/builtin.aqua"
import "@fluencelabs/trust-graph/trust-graph.aqua"
-- hack
data SignResult:
success: bool
error: string
signature: []u8
service Sig("sig"):
sign(data: []u8) -> SignResult
func get_key_signature(label: string, timestamp_created: u64) -> []u8:
on HOST_PEER_ID:
bytes <- Registry.get_key_bytes(label, nil, timestamp_created, nil, "")
signature <- Sig.sign(bytes)
<- signature.signature
func get_record_signature(key_id: string, value: string, relay_id: ?PeerId, service_id: ?string, timestamp_created: u64) -> []u8:
on HOST_PEER_ID:
bytes <- Registry.get_record_bytes(key_id, value, relay_id, service_id, timestamp_created, nil)
signature <- Sig.sign(bytes)
<- signature.signature
func get_host_record_signature(key_id: string, value: string, relay_id: ?PeerId, service_id: ?string, timestamp_created: u64) -> []u8:
on HOST_PEER_ID:
bytes <- Registry.get_host_record_bytes(key_id, value, relay_id, service_id, timestamp_created, nil)
signature <- Sig.sign(bytes)
<- signature.signature
func register_key(label: string, timestamp_created: u64, signature: []u8, pin: bool) -> RegisterKeyResult:
t <- Peer.timestamp_sec()
weight <- TrustGraph.get_weight(%init_peer_id%, t)
result <- Registry.register_key(label, nil, timestamp_created, nil, "", signature, pin, weight, t)
<- result
func put_record(key_id: string, value: string, relay_id: ?PeerId, service_id: []string, timestamp_created: u64, signature: []u8) -> DhtResult:
t <- Peer.timestamp_sec()
weight <- TrustGraph.get_weight(%init_peer_id%, t)
result <- Registry.put_record(key_id, value, relay_id, service_id, timestamp_created, nil, signature, weight, t)
<- result
func put_host_record(key_id: string, value: string, relay_id: ?PeerId, service_id: []string, timestamp_created: u64, signature: []u8) -> PutHostRecordResult:
t <- Peer.timestamp_sec()
weight <- TrustGraph.get_weight(%init_peer_id%, t)
result <- Registry.put_host_record(key_id, value, relay_id, service_id, timestamp_created, nil, signature, weight, t)
<- result
func propagate_host_record(res: PutHostRecordResult) -> DhtResult:
t <- Peer.timestamp_sec()
weight <- TrustGraph.get_weight(%init_peer_id%, t)
result <- Registry.propagate_host_record(res, t, weight)
<- result

View File

@ -0,0 +1,32 @@
module Registry.Scheduled declares *
export clearExpired_86400, replicate_3600
import "registry.aqua"
import "@fluencelabs/aqua-lib/builtin.aqua"
import "@fluencelabs/trust-graph/trust-graph.aqua"
-- clears expired records
func clearExpired_86400():
on HOST_PEER_ID:
t <- Peer.timestamp_sec()
Registry.clear_expired(t)
-- get all old records and replicate it by keys
func replicate_3600():
on HOST_PEER_ID:
t <- Peer.timestamp_sec()
res <- Registry.evict_stale(t)
for r <- res.results par:
k <- Op.string_to_b58(r.key.key_id)
nodes <- Kademlia.neighborhood(k, nil, nil)
for n <- nodes par:
on n:
tt <- Peer.timestamp_sec()
key_weight <- TrustGraph.get_weight(r.key.peer_id, tt)
Registry.republish_key(r.key, key_weight, tt)
records_weights: *WeightResult
for record <- r.records:
records_weights <- TrustGraph.get_weight(record.peer_id, tt)
Registry.republish_records(r.records, records_weights, tt)

97
aqua/registry.aqua Normal file
View File

@ -0,0 +1,97 @@
module Registry declares *
data ClearExpiredResult:
success: bool
error: string
count_keys: u64
count_values: u64
data DhtResult:
success: bool
error: string
data Key:
key_id: string
label: string
peer_id: string
timestamp_created: u64
challenge: []u8
challenge_type: string
signature: []u8
data Record:
key_id: string
value: string
peer_id: string
set_by: string
relay_id: []string
service_id: []string
timestamp_created: u64
solution: []u8
signature: []u8
data EvictStaleItem:
key: Key
records: []Record
data EvictStaleResult:
success: bool
error: string
results: []EvictStaleItem
data GetKeyMetadataResult:
success: bool
error: string
key: Key
data GetValuesResult:
success: bool
error: string
result: []Record
data MergeResult:
success: bool
error: string
result: []Record
data PutHostRecordResult:
success: bool
error: string
value: []Record
data RegisterKeyResult:
success: bool
error: string
key_id: string
data RepublishValuesResult:
success: bool
error: string
updated: u64
data WeightResult:
success: bool
weight: u32
peer_id: string
error: string
service Registry("registry"):
clear_expired(current_timestamp_sec: u64) -> ClearExpiredResult
clear_host_record(key_id: string, current_timestamp_sec: u64) -> DhtResult
evict_stale(current_timestamp_sec: u64) -> EvictStaleResult
get_host_record_bytes(key_id: string, value: string, relay_id: []string, service_id: []string, timestamp_created: u64, solution: []u8) -> []u8
get_key_bytes(label: string, peer_id: []string, timestamp_created: u64, challenge: []u8, challenge_type: string) -> []u8
get_key_id(label: string, peer_id: string) -> string
get_key_metadata(key_id: string, current_timestamp_sec: u64) -> GetKeyMetadataResult
get_record_bytes(key_id: string, value: string, relay_id: []string, service_id: []string, timestamp_created: u64, solution: []u8) -> []u8
get_records(key_id: string, current_timestamp_sec: u64) -> GetValuesResult
merge(records: [][]Record) -> MergeResult
merge_two(a: []Record, b: []Record) -> MergeResult
propagate_host_record(set_host_value: PutHostRecordResult, current_timestamp_sec: u64, weight: WeightResult) -> DhtResult
put_host_record(key_id: string, value: string, relay_id: []string, service_id: []string, timestamp_created: u64, solution: []u8, signature: []u8, weight: WeightResult, current_timestamp_sec: u64) -> PutHostRecordResult
put_record(key_id: string, value: string, relay_id: []string, service_id: []string, timestamp_created: u64, solution: []u8, signature: []u8, weight: WeightResult, current_timestamp_sec: u64) -> DhtResult
register_key(key: string, peer_id: []string, timestamp_created: u64, challenge: []u8, challenge_type: string, signature: []u8, pin: bool, weight: WeightResult, current_timestamp_sec: u64) -> RegisterKeyResult
republish_key(key: Key, weight: WeightResult, current_timestamp_sec: u64) -> DhtResult
republish_records(records: []Record, weights: []WeightResult, current_timestamp_sec: u64) -> RepublishValuesResult
set_expired_timeout(timeout_sec: u64)
set_stale_timeout(timeout_sec: u64)

165
aqua/routing.aqua Normal file
View File

@ -0,0 +1,165 @@
module Registry.Routing declares *
import "registry.aqua"
import "registry-api.aqua"
import "@fluencelabs/aqua-lib/builtin.aqua"
alias RouteId: string
func get_route_id(label: string, peer_id: string) -> RouteId:
route_id <- Registry.get_key_id(label, peer_id)
<- route_id
-- Get peers closest to the label's hash in Kademlia network
-- These peers are expected to store list of subscribers of this label
func getNeighbours(route_id: string) -> []PeerId:
k <- Op.string_to_b58(route_id)
nodes <- Kademlia.neighborhood(k, nil, nil)
<- nodes
-- If this peer have set node_id as a subscriber for label,
-- this call will prevent subscriber from re-subscribing
-- so that eventually it will disappear from the subscribers list
func removeFromRoute(route_id: string):
on HOST_PEER_ID:
t <- Peer.timestamp_sec()
Registry.clear_host_record(route_id, t)
-- Create a route: register it on the closest peers
func createRoute(label: string) -> RouteId:
t <- Peer.timestamp_sec()
signature <- get_key_signature(label, t)
on HOST_PEER_ID:
route_id <- get_route_id(label, %init_peer_id%)
nodes <- getNeighbours(route_id)
for n <- nodes par:
on n:
try:
result <- register_key(label, t, signature, false)
<- route_id
-- Create a label and subscribe to it
-- %init_peer_id% (current client) will become a subscriber
func createRouteAndRegister(label: string, value: string, relay_id: ?PeerId, service_id: ?string) -> string:
t <- Peer.timestamp_sec()
key_signature <- get_key_signature(label, t)
on HOST_PEER_ID:
route_id <- get_route_id(label, %init_peer_id%)
record_signature <- get_record_signature(route_id, value, relay_id, service_id, t)
on HOST_PEER_ID:
nodes <- getNeighbours(route_id)
for n <- nodes par:
on n:
try:
register_key(label, t, key_signature, false)
put_record(route_id, value, relay_id, service_id, t, record_signature)
<- route_id
-- Create a label and subscribe to it
-- %init_peer_id% (current client) will become a subscriber
-- In contrast with non-blocking version, waits for at least a single write to succeed
func createRouteAndRegisterBlocking(
label: string, value: string,
relay_id: ?PeerId, service_id: ?string,
progress: string -> (),
ack: i16
) -> string:
t <- Peer.timestamp_sec()
key_signature <- get_key_signature(label, t)
on HOST_PEER_ID:
route_id <- get_route_id(label, %init_peer_id%)
record_signature <- get_record_signature(route_id, value, relay_id, service_id, t)
results: *DhtResult
on HOST_PEER_ID:
nodes <- getNeighbours(route_id)
for n <- nodes par:
on n:
try:
res1 <- register_key(label, t, key_signature, false)
result <- put_record(route_id, value, relay_id, service_id, t, record_signature)
if result.success:
results <<- result
progress(n)
join results[ack]
<- route_id
-- Create a label and make the given node a subscriber to it
func createRouteAndRegisterNode(subscriber_node_id: PeerId, label: string, value: string, service_id: ?string) -> string:
t <- Peer.timestamp_sec()
key_signature <- get_key_signature(label, t)
on HOST_PEER_ID:
route_id <- get_route_id(label, %init_peer_id%)
record_signature <- get_host_record_signature(route_id, value, nil, service_id, t)
on subscriber_node_id:
register_key(label, t, key_signature, false)
r <- put_host_record(route_id, value, nil, service_id, t, record_signature)
nodes <- getNeighbours(route_id)
for n <- nodes par:
on n:
try:
register_key(label, t, key_signature, false)
propagate_host_record(r)
<- route_id
-- Subscribe to a label
-- Note: label must be already initiated
func registerForRoute(route_id: string, value: string, relay_id: ?PeerId, service_id: ?string):
t <- Peer.timestamp_sec()
record_signature <- get_record_signature(route_id, value, relay_id, service_id, t)
on HOST_PEER_ID:
nodes <- getNeighbours(route_id)
for n <- nodes par:
on n:
try:
put_record(route_id, value, relay_id, service_id, t, record_signature)
-- Subscribe a node to the given label
-- Note: label must be already initiated
func registerForRouteNode(subscriber_node_id: PeerId, label: string, value: string, service_id: ?string):
t <- Peer.timestamp_sec()
key_signature <- get_key_signature(label, t)
on HOST_PEER_ID:
route_id <- get_route_id(label, %init_peer_id%)
record_signature <- get_host_record_signature(route_id, value, nil, service_id, t)
on subscriber_node_id:
r <- put_host_record(route_id, value, nil, service_id, t, record_signature)
nodes <- getNeighbours(route_id)
for n <- nodes par:
on n:
try:
register_key(label, t, key_signature, false)
propagate_host_record(r)
-- Find the list of record for the given route_id
func resolveRoute(route_id: string, ack: i16) -> []Record:
on HOST_PEER_ID:
nodes <- getNeighbours(route_id)
res: *[]Record
for n <- nodes par:
on n:
try:
t <- Peer.timestamp_sec()
get_result <- Registry.get_records(route_id, t)
res <<- get_result.result
join res[ack]
--par Peer.timeout(100000000, "timeout")
result <- Registry.merge(res)
<- result.result
-- Execute the given code on subscribers
-- Note that you can provide another Aqua function as an argument to this one
func executeOnRoute(route_id: string, ack: i16, call: Record -> ()):
subs <- resolveRoute(route_id, ack)
for r <- subs par:
on r.peer_id via r.relay_id:
call(r)

View File

@ -1,7 +0,0 @@
{
"name": "aqua-dht",
"dependencies": [
"name:sqlite3",
"name:aqua-dht"
]
}

View File

@ -9,7 +9,7 @@ SCHEDULED="${SCRIPT_DIR}/scheduled"
(
echo "*** compile scheduled scripts ***"
cd ../aqua
npx aqua --no-relay --air -i ./dht-scheduled-scripts.aqua -o "$SCHEDULED"
npx aqua --no-relay --air -i ./registry-scheduled-scripts.aqua -o "$SCHEDULED"
)
(
@ -21,9 +21,9 @@ SCHEDULED="${SCRIPT_DIR}/scheduled"
(
echo "*** create builtin distribution package ***"
cd ..
mv builtin-package aqua-dht
tar --exclude="package.sh" -f aqua-dht.tar.gz -zcv ./aqua-dht
mv aqua-dht builtin-package
mv builtin-package registry
tar --exclude="package.sh" -f registry.tar.gz -zcv ./registry
mv registry builtin-package
)
echo "*** done ***"

View File

@ -1,5 +1,5 @@
{
"name": "aqua-dht",
"name": "registry",
"mem_page_count": 1,
"preopened_files": [
"/tmp"

6413
example/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
{
"name": "example",
"version": "1.0.0",
"description": "An example of how to use AquaDHT in TypeScript",
"description": "An example of how to use Registry in TypeScript",
"main": "dist/example.js",
"scripts": {
"compile-aqua": "aqua -i ./src/aqua -o ./src/generated",
@ -17,10 +17,14 @@
"author": "Fluence Labs",
"license": "MIT",
"dependencies": {
"@fluencelabs/aqua-dht": "../aqua",
"@fluencelabs/aqua-lib": "0.1.14",
"@fluencelabs/aqua": "0.3.1-228",
"@fluencelabs/fluence": "^0.12.0",
"@fluencelabs/fluence-network-environment": "^1.0.10"
"@fluencelabs/registry": "../aqua",
"@fluencelabs/aqua-lib": "^0.4.0",
"@fluencelabs/aqua": "0.6.1-279",
"@fluencelabs/fluence": "0.20.1",
"@fluencelabs/fluence-network-environment": "^1.0.13",
"@fluencelabs/trust-graph": "^3.0.0"
},
"devDependencies": {
"typescript": "^4.4.3"
}
}

View File

@ -1,10 +1,9 @@
-- This file demonstrates how to send events to subscribers of a topic
-- Detailed explanation can be found in the Aqua Book: https://doc.fluence.dev/aqua-book/libraries/aqua-dht#passing-data-to-subscribers
import "@fluencelabs/aqua-dht/pubsub.aqua"
import "@fluencelabs/aqua-dht/dht.aqua"
alias PeerId: string
import "@fluencelabs/registry/routing.aqua"
import "@fluencelabs/registry/registry.aqua"
import PeerId from "@fluencelabs/aqua-lib/builtin.aqua"
-- Application event
data Event:
@ -24,10 +23,10 @@ func call_subscriber(sub: Record, event: Event):
SubscriberAPI.receive_event(event)
-- send event to every subscriber
func send_everyone(topic: string, event: Event):
func send_everyone(route_id: string, event: Event, ack: i16):
on HOST_PEER_ID:
-- retrieve all subscribers of a topic
subscribers <- findSubscribers(topic)
subscribers <- resolveRoute(route_id, ack)
-- iterate through them
for sub <- subscribers par:
call_subscriber(sub, event)

View File

@ -1,3 +1,9 @@
import initTopicAndSubscribeBlocking, findSubscribers from "@fluencelabs/aqua-dht/pubsub.aqua"
module Export
import createRouteAndRegisterBlocking, resolveRoute from "@fluencelabs/registry/routing.aqua"
import Peer from "@fluencelabs/aqua-lib/builtin.aqua"
export createRouteAndRegisterBlocking, resolveRoute, timestamp_sec
export initTopicAndSubscribeBlocking, findSubscribers
func timestamp_sec() -> u64:
result <- Peer.timestamp_sec()
<- result

View File

@ -1,27 +1,53 @@
import { Fluence } from "@fluencelabs/fluence";
import { krasnodar } from "@fluencelabs/fluence-network-environment";
import { initTopicAndSubscribeBlocking, findSubscribers } from "./generated/export";
import {Fluence, KeyPair} from "@fluencelabs/fluence";
import { krasnodar, Node } from "@fluencelabs/fluence-network-environment";
import {createRouteAndRegisterBlocking, resolveRoute, timestamp_sec} from "./generated/export";
let local: Node[] = [
{
peerId: "12D3KooWHBG9oaVx4i3vi6c1rSBUm7MLBmyGmmbHoZ23pmjDCnvK",
multiaddr:
"/ip4/127.0.0.1/tcp/9990/ws/p2p/12D3KooWHBG9oaVx4i3vi6c1rSBUm7MLBmyGmmbHoZ23pmjDCnvK",
},
{
peerId: "12D3KooWRABanQHUn28dxavN9ZS1zZghqoZVAYtFpoN7FdtoGTFv",
multiaddr:
"/ip4/127.0.0.1/tcp/9991/ws/p2p/12D3KooWRABanQHUn28dxavN9ZS1zZghqoZVAYtFpoN7FdtoGTFv",
},
{
peerId: "12D3KooWFpQ7LHxcC9FEBUh3k4nSCC12jBhijJv3gJbi7wsNYzJ5",
multiaddr:
"/ip4/127.0.0.1/tcp/9992/ws/p2p/12D3KooWFpQ7LHxcC9FEBUh3k4nSCC12jBhijJv3gJbi7wsNYzJ5",
},
];
async function main() {
// connect to the Fluence network
await Fluence.start({ connectTo: krasnodar[1] });
let topic = "myTopic" + new Date().valueOf();
let value = "myValue";
console.log("Will create topic", topic);
// create topic (if not exists) and subscribe on it
let relay = Fluence.getStatus().relayPeerId;
await initTopicAndSubscribeBlocking(
topic, value, relay, null,
(s) => console.log(`node ${s} saved the record`)
await Fluence.start({ connectTo: local[0] });
console.log("%s", await timestamp_sec());
console.log(
"📗 created a fluence peer %s with relay %s",
Fluence.getStatus().peerId,
Fluence.getStatus().relayPeerId
);
// find other peers subscribed to that topic
let subscribers = await findSubscribers(topic);
let label = "myTopic";
let value = "myValue";
console.log("Will create topic", label);
// create route (if not exists) and register on it
let relay = Fluence.getStatus().relayPeerId;
let route_id = await createRouteAndRegisterBlocking(
label, value, relay, null,
(s) => console.log(`node ${s} saved the record`),
0
);
// find other peers on this route
console.log("let's find subscribers for %s", route_id);
let subscribers = await resolveRoute(route_id, 0);
console.log("found subscribers:", subscribers);
}
main()
.then(() => process.exit(0))
.catch(error => {
main().then(() => process.exit(0))
.catch(error => {
console.error(error);
process.exit(1);
});

1316
service/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,26 @@
[package]
name = "aqua-dht"
version = "0.1.1"
name = "registry"
version = "0.2.0"
authors = ["Fluence Labs"]
edition = "2018"
publish = false
[[bin]]
name = "aqua-dht"
name = "registry"
path = "src/main.rs"
[dependencies]
bs58 = "0.3.1"
marine-rs-sdk = "0.6.10"
marine-sqlite-connector = "0.5.0"
marine-sqlite-connector = "0.5.2"
fstrings = "0.2.3"
boolinator = "2.4.0"
toml = "0.5.6"
serde = { version = "1.0.118", features = ["derive"] }
thiserror = "1.0.29"
sha2 = "0.10.1"
fluence-keypair = "0.6.0"
libp2p-core = { version = "0.31", default-features = false, features = [ "secp256k1" ] }
[dev-dependencies]
marine-rs-sdk-test = "0.4.0"

View File

@ -10,7 +10,7 @@ modules_dir = "artifacts/"
mapped_dirs = { "tmp" = "/tmp" }
[[module]]
name = "aqua-dht"
name = "registry"
mem_pages_count = 1
logger_enabled = false

View File

@ -17,7 +17,7 @@ use marine_rs_sdk_test::generate_marine_test_env;
use marine_rs_sdk_test::ServiceDescription;
fn main() {
let services = vec![(
"aqua-dht".to_string(),
"registry".to_string(),
ServiceDescription {
config_path: "Config.toml".to_string(),
modules_dir: Some("artifacts".to_string()),
@ -29,5 +29,7 @@ fn main() {
generate_marine_test_env(services, "marine_test_env.rs", file!());
}
println!("cargo:rerun-if-changed=src/key_api.rs");
println!("cargo:rerun-if-changed=src/record_api.rs");
println!("cargo:rerun-if-changed=src/main.rs");
}

View File

@ -4,17 +4,17 @@ set -o errexit -o nounset -o pipefail
# set current working directory to script directory to run script from everywhere
cd "$(dirname "$0")"
# build aqua-dht.wasm
# build registry.wasm
cargo update
marine build --release
# copy .wasm to artifacts
rm -f artifacts/*
mkdir -p artifacts
cp target/wasm32-wasi/release/aqua-dht.wasm artifacts/
cp target/wasm32-wasi/release/registry.wasm artifacts/
# download SQLite 3 to use in tests
curl -L https://github.com/fluencelabs/sqlite/releases/download/v0.14.0_w/sqlite3.wasm -o artifacts/sqlite3.wasm
curl -L https://github.com/fluencelabs/sqlite/releases/download/v0.15.0_w/sqlite3.wasm -o artifacts/sqlite3.wasm
# generate Aqua bindings
marine aqua artifacts/aqua-dht.wasm -s AquaDHT -i aqua-dht >../aqua/dht.aqua
marine aqua artifacts/registry.wasm -s Registry -i registry >../aqua/registry.aqua

View File

@ -17,15 +17,12 @@
use serde::{Deserialize, Serialize};
use std::fs;
use crate::defaults::{
CONFIG_FILE, DEFAULT_EXPIRED_HOST_VALUE_AGE, DEFAULT_EXPIRED_VALUE_AGE, DEFAULT_STALE_VALUE_AGE,
};
use crate::defaults::{CONFIG_FILE, DEFAULT_EXPIRED_VALUE_AGE, DEFAULT_STALE_VALUE_AGE};
#[derive(Deserialize, Serialize)]
pub struct Config {
pub expired_timeout: u64,
pub stale_timeout: u64,
pub host_expired_timeout: u64,
}
pub fn write_config(config: Config) {
@ -43,7 +40,6 @@ pub fn create_config() {
write_config(Config {
expired_timeout: DEFAULT_EXPIRED_VALUE_AGE,
stale_timeout: DEFAULT_STALE_VALUE_AGE,
host_expired_timeout: DEFAULT_EXPIRED_HOST_VALUE_AGE,
});
}
}

View File

@ -16,13 +16,15 @@
// TODO: sanitize tables' names in SQL expressions
pub static KEYS_TABLE_NAME: &str = "dht_keys";
pub static VALUES_TABLE_NAME: &str = "dht_values";
pub static KEYS_TIMESTAMPS_TABLE_NAME: &str = "dht_keys_timestamps";
pub static RECORDS_TABLE_NAME: &str = "dht_records";
pub static CONFIG_FILE: &str = "/tmp/Config.toml";
pub static DB_PATH: &str = "/tmp/dht.db";
pub static DB_PATH: &str = "/tmp/registry.db";
pub static DEFAULT_STALE_VALUE_AGE: u64 = 60 * 60;
pub static DEFAULT_EXPIRED_VALUE_AGE: u64 = 24 * 60 * 60;
pub static DEFAULT_EXPIRED_HOST_VALUE_AGE: u64 = 10 * DEFAULT_EXPIRED_VALUE_AGE;
pub static VALUES_LIMIT: usize = 32;
pub static TRUSTED_TIMESTAMP_SERVICE_ID: &str = "peer";
pub static TRUSTED_TIMESTAMP_FUNCTION_NAME: &str = "timestamp_sec";
pub static TRUSTED_WEIGHT_SERVICE_ID: &str = "trust-graph";
pub static TRUSTED_WEIGHT_FUNCTION_NAME: &str = "get_weight";

View File

@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use fluence_keypair::error::DecodingError;
use marine_sqlite_connector::Error as SqliteError;
use thiserror::Error as ThisError;
@ -26,11 +27,11 @@ pub enum ServiceError {
),
#[error("Requested key {0} does not exist")]
KeyNotExists(String),
#[error("Key {0} already exists with different peer_id")]
KeyAlreadyExists(String),
#[error("Values limit for key {0} is exceeded")]
#[error("Key {0} for {1} peer_id already exists with newer timestamp")]
KeyAlreadyExistsNewerTimestamp(String, String),
#[error("Values limit for key_d {0} is exceeded")]
ValuesLimitExceeded(String),
#[error("Host value for key {0} not found ")]
#[error("Host value for key_id {0} not found ")]
HostValueNotFound(String),
#[error("Invalid set_host_value result: success is false or value is missing")]
InvalidSetHostValueResult,
@ -44,4 +45,36 @@ pub enum ServiceError {
"Invalid set_host_value tetraplet: you should use put_host_value to pass set_host_value: {0}"
)]
InvalidSetHostValueTetraplet(String),
#[error(
"Invalid weight tetraplet: you should use host trust-graph.get_weight to pass weight: {0}"
)]
InvalidWeightTetraplet(String),
#[error("Invalid weight peer_id: expected {0}, found {1}")]
InvalidWeightPeerId(String, String),
#[error("Invalid key {0} signature: {1}")]
InvalidKeySignature(String, #[source] fluence_keypair::error::VerificationError),
#[error("Invalid record signature for key {0} and value {1}: {2}")]
InvalidRecordSignature(
String,
String,
#[source] fluence_keypair::error::VerificationError,
),
#[error("Key can't be registered in the future")]
InvalidKeyTimestamp,
#[error("Record can't be registered in the future")]
InvalidRecordTimestamp,
#[error("Records to publish should belong to one key id")]
RecordsPublishingError,
#[error("peer id parse error: {0}")]
PeerIdParseError(String),
#[error("public key extraction from peer id failed: {0}")]
PublicKeyExtractionError(String),
#[error("{0}")]
PublicKeyDecodeError(
#[from]
#[source]
DecodingError,
),
#[error("Weight for record with peer_id {0} and set_by {1} is missing ")]
MissingRecordWeight(String, String),
}

View File

@ -1,672 +0,0 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::collections::HashMap;
use boolinator::Boolinator;
use marine_rs_sdk::CallParameters;
use marine_sqlite_connector::{Connection, Result as SqliteResult, State, Statement, Value};
use crate::config::load_config;
use crate::defaults::{
DB_PATH, KEYS_TABLE_NAME, TRUSTED_TIMESTAMP_FUNCTION_NAME, TRUSTED_TIMESTAMP_SERVICE_ID,
VALUES_LIMIT, VALUES_TABLE_NAME,
};
use crate::error::ServiceError;
use crate::error::ServiceError::{
HostValueNotFound, InternalError, InvalidSetHostValueResult, InvalidSetHostValueTetraplet,
InvalidTimestampTetraplet, KeyAlreadyExists, KeyNotExists, ValuesLimitExceeded,
};
use crate::results::{EvictStaleItem, Key, PutHostValueResult, Record};
fn get_custom_option(value: String) -> Vec<String> {
if value.is_empty() {
vec![]
} else {
vec![value]
}
}
fn read_key(statement: &Statement) -> Result<Key, ServiceError> {
Ok(Key {
key: statement.read::<String>(0)?,
peer_id: statement.read::<String>(1)?,
timestamp_created: statement.read::<i64>(2)? as u64,
pinned: statement.read::<i64>(3)? != 0,
weight: statement.read::<i64>(4)? as u32,
})
}
fn read_record(statement: &Statement) -> Result<Record, ServiceError> {
Ok(Record {
value: statement.read::<String>(0)?,
peer_id: statement.read::<String>(1)?,
set_by: statement.read::<String>(2)?,
relay_id: get_custom_option(statement.read::<String>(3)?),
service_id: get_custom_option(statement.read::<String>(4)?),
timestamp_created: statement.read::<i64>(5)? as u64,
weight: statement.read::<i64>(6)? as u32,
})
}
fn check_key_existence(
connection: &Connection,
key: String,
current_timestamp_sec: u64,
) -> Result<(), ServiceError> {
get_key_metadata_helper(connection, key, current_timestamp_sec).map(|_| ())
}
fn insert_or_replace_value(
connection: &Connection,
key: String,
record: Record,
current_timestamp: u64,
) -> Result<(), ServiceError> {
let relay_id = if record.relay_id.is_empty() {
"".to_string()
} else {
record.relay_id[0].clone()
};
let service_id = if record.service_id.is_empty() {
"".to_string()
} else {
record.service_id[0].clone()
};
let mut statement = connection.prepare(f!(
"INSERT OR REPLACE INTO {VALUES_TABLE_NAME} VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
))?;
statement.bind(1, &Value::String(key))?;
statement.bind(2, &Value::String(record.value))?;
statement.bind(3, &Value::String(record.peer_id))?;
statement.bind(4, &Value::String(record.set_by))?;
statement.bind(5, &Value::String(relay_id))?;
statement.bind(6, &Value::String(service_id))?;
statement.bind(7, &Value::Integer(record.timestamp_created as i64))?;
statement.bind(8, &Value::Integer(current_timestamp as i64))?;
statement.bind(9, &Value::Integer(record.weight as i64))?;
statement.next().map(drop)?;
Ok(())
}
fn delete_value(
connection: &Connection,
key: &str,
peer_id: String,
set_by: String,
) -> Result<(), ServiceError> {
let mut statement = connection.prepare(f!(
"DELETE FROM {VALUES_TABLE_NAME} WHERE key=? AND peer_id=? AND set_by=?"
))?;
statement.bind(1, &Value::String(key.to_string()))?;
statement.bind(2, &Value::String(peer_id))?;
statement.bind(3, &Value::String(set_by))?;
statement.next().map(drop)?;
Ok(())
}
/// Check timestamps are generated on the current host with builtin ("peer" "timestamp_sec")
pub(crate) fn check_timestamp_tetraplets(
call_parameters: &CallParameters,
arg_number: usize,
) -> Result<(), ServiceError> {
let tetraplets = call_parameters
.tetraplets
.get(arg_number)
.ok_or_else(|| InvalidTimestampTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
let tetraplet = tetraplets
.get(0)
.ok_or_else(|| InvalidTimestampTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
(tetraplet.service_id == TRUSTED_TIMESTAMP_SERVICE_ID
&& tetraplet.function_name == TRUSTED_TIMESTAMP_FUNCTION_NAME
&& tetraplet.peer_pk == call_parameters.host_id)
.then(|| ())
.ok_or_else(|| InvalidTimestampTetraplet(format!("{:?}", tetraplet)))
}
pub(crate) fn check_host_value_tetraplets(
call_parameters: &CallParameters,
arg_number: usize,
host_value: &Record,
) -> Result<(), ServiceError> {
let tetraplets = call_parameters
.tetraplets
.get(arg_number)
.ok_or_else(|| InvalidSetHostValueTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
let tetraplet = tetraplets
.get(0)
.ok_or_else(|| InvalidSetHostValueTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
(tetraplet.service_id == "aqua-dht"
&& tetraplet.function_name == "put_host_value"
&& tetraplet.peer_pk == host_value.peer_id)
.then(|| ())
.ok_or_else(|| InvalidSetHostValueTetraplet(format!("{:?}", tetraplet)))
}
#[inline]
pub(crate) fn get_connection() -> SqliteResult<Connection> {
marine_sqlite_connector::open(DB_PATH)
}
pub(crate) fn create_keys_table() -> bool {
let connection = get_connection().unwrap();
connection
.execute(f!("
CREATE TABLE IF NOT EXISTS {KEYS_TABLE_NAME} (
key TEXT PRIMARY KEY,
timestamp_created INTEGER,
timestamp_accessed INTEGER,
peer_id TEXT,
pinned INTEGER,
weight INTEGER
);
"))
.is_ok()
}
pub(crate) fn create_values_table() -> bool {
let connection = get_connection().unwrap();
connection
.execute(f!("
CREATE TABLE IF NOT EXISTS {VALUES_TABLE_NAME} (
key TEXT,
value TEXT,
peer_id TEXT,
set_by TEXT,
relay_id TEXT,
service_id TEXT,
timestamp_created INTEGER,
timestamp_accessed INTEGER,
weight INTEGER,
PRIMARY KEY (key, peer_id, set_by)
);
"))
.is_ok()
}
/// Update timestamp_accessed and return metadata of the key
fn get_key_metadata_helper(
connection: &Connection,
key: String,
current_timestamp_sec: u64,
) -> Result<Key, ServiceError> {
let mut statement = connection.prepare(f!(
"UPDATE {KEYS_TABLE_NAME} SET timestamp_accessed = ? WHERE key = ?"
))?;
statement.bind(1, &Value::Integer(current_timestamp_sec as i64))?;
statement.bind(2, &Value::String(key.clone()))?;
statement.next()?;
let mut statement = connection.prepare(f!(
"SELECT key, peer_id, timestamp_created, pinned, weight \
FROM {KEYS_TABLE_NAME} WHERE key = ?"
))?;
statement.bind(1, &Value::String(key.clone()))?;
if let State::Row = statement.next()? {
read_key(&statement)
} else {
Err(KeyNotExists(key))
}
}
/// Insert key if not exists or update timestamp if peer_id is same
fn update_key(
connection: &Connection,
key: String,
peer_id: String,
timestamp_created: u64,
timestamp_accessed: u64,
pin: bool,
weight: u32,
) -> Result<(), ServiceError> {
let old_key = get_key_metadata_helper(connection, key.clone(), timestamp_accessed);
let pinned = pin as i32;
let update_allowed = {
match old_key {
Ok(key) => key.peer_id == peer_id && key.timestamp_created <= timestamp_created,
Err(_) => true,
}
};
if update_allowed {
let mut statement = connection.prepare(f!("
INSERT OR REPLACE INTO {KEYS_TABLE_NAME} VALUES (?, ?, ?, ?, ?, ?);
"))?;
statement.bind(1, &Value::String(key))?;
statement.bind(2, &Value::Integer(timestamp_created as i64))?;
statement.bind(3, &Value::Integer(timestamp_accessed as i64))?;
statement.bind(4, &Value::String(peer_id))?;
statement.bind(5, &Value::Integer(pinned as i64))?;
statement.bind(6, &Value::Integer(weight as i64))?;
statement.next()?;
Ok(())
} else {
Err(KeyAlreadyExists(key))
}
}
pub fn get_key_metadata_impl(key: String, current_timestamp_sec: u64) -> Result<Key, ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 1)?;
get_key_metadata_helper(&get_connection()?, key, current_timestamp_sec)
}
/// register new key if not exists with caller peer_id, update if exists with same peer_id or return error
pub fn register_key_impl(
key: String,
current_timestamp_sec: u64,
pin: bool,
weight: u32,
) -> Result<(), ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
let peer_id = call_parameters.init_peer_id.clone();
check_timestamp_tetraplets(&call_parameters, 1)?;
update_key(
&get_connection()?,
key,
peer_id,
current_timestamp_sec,
current_timestamp_sec,
pin,
weight,
)
}
/// Used for replication, same as register_key, but key.pinned is ignored, updates timestamp_accessed
pub fn republish_key_impl(key: Key, current_timestamp_sec: u64) -> Result<(), ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 1)?;
// Key.pinned is ignored in republish
update_key(
&get_connection()?,
key.key,
key.peer_id,
key.timestamp_created,
current_timestamp_sec,
false,
key.weight,
)
}
/// Put value with caller peer_id if the key exists.
/// If the value is NOT a host value and the key already has `VALUES_LIMIT` records, then a value with the smallest weight is removed and the new value is inserted instead.
pub fn put_value_impl(
key: String,
value: String,
current_timestamp_sec: u64,
relay_id: Vec<String>,
service_id: Vec<String>,
weight: u32,
host: bool,
) -> Result<Record, ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 2)?;
let connection = get_connection()?;
check_key_existence(&connection, key.clone(), current_timestamp_sec)?;
let records_count = get_non_host_records_count_by_key(&connection, key.clone())?;
// check values limits for non-host values
if !host && records_count >= VALUES_LIMIT {
let min_weight_record = get_min_weight_non_host_record_by_key(&connection, key.clone())?;
if min_weight_record.weight < weight {
// delete the lightest record if the new one is heavier
delete_value(
&connection,
&key,
min_weight_record.peer_id,
min_weight_record.set_by,
)?;
} else {
// return error if limit is exceeded
return Err(ValuesLimitExceeded(key));
}
}
let result = Record {
value,
peer_id: if host {
call_parameters.host_id
} else {
call_parameters.init_peer_id.clone()
},
set_by: call_parameters.init_peer_id,
relay_id,
service_id,
timestamp_created: current_timestamp_sec,
weight,
};
insert_or_replace_value(&connection, key, result.clone(), current_timestamp_sec)?;
Ok(result)
}
/// Return all values by key
pub fn get_values_helper(
connection: &Connection,
key: String,
) -> Result<Vec<Record>, ServiceError> {
let mut statement = connection.prepare(
f!("SELECT value, peer_id, set_by, relay_id, service_id, timestamp_created, weight FROM {VALUES_TABLE_NAME} \
WHERE key = ? ORDER BY weight DESC"))?;
statement.bind(1, &Value::String(key))?;
let mut result: Vec<Record> = vec![];
while let State::Row = statement.next()? {
result.push(read_record(&statement)?)
}
Ok(result)
}
fn get_non_host_records_count_by_key(
connection: &Connection,
key: String,
) -> Result<usize, ServiceError> {
let host_id = marine_rs_sdk::get_call_parameters().host_id;
// only only non-host values
let mut statement = connection.prepare(f!(
"SELECT COUNT(*) FROM {VALUES_TABLE_NAME} WHERE key = ? AND peer_id != ?"
))?;
statement.bind(1, &Value::String(key))?;
statement.bind(2, &Value::String(host_id))?;
if let State::Row = statement.next()? {
statement
.read::<i64>(0)
.map(|n| n as usize)
.map_err(ServiceError::SqliteError)
} else {
Err(InternalError(f!(
"get_non_host_records_count_by_key: something went totally wrong"
)))
}
}
fn get_min_weight_non_host_record_by_key(
connection: &Connection,
key: String,
) -> Result<Record, ServiceError> {
let host_id = marine_rs_sdk::get_call_parameters().host_id;
// only only non-host values
let mut statement = connection.prepare(
f!("SELECT value, peer_id, set_by, relay_id, service_id, timestamp_created, weight FROM {VALUES_TABLE_NAME} \
WHERE key = ? AND peer_id != ? ORDER BY weight ASC LIMIT 1"))?;
statement.bind(1, &Value::String(key.clone()))?;
statement.bind(2, &Value::String(host_id))?;
if let State::Row = statement.next()? {
read_record(&statement)
} else {
Err(InternalError(f!(
"not found non-host records for given key: {key}"
)))
}
}
/// Return all values by key and update timestamp_accessed
pub fn get_values_impl(
key: String,
current_timestamp_sec: u64,
) -> Result<Vec<Record>, ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 1)?;
let connection = get_connection()?;
check_key_existence(&connection, key.clone(), current_timestamp_sec)?;
let mut statement = connection.prepare(f!("UPDATE {VALUES_TABLE_NAME} \
SET timestamp_accessed = ? \
WHERE key = ?"))?;
statement.bind(1, &Value::Integer(current_timestamp_sec as i64))?;
statement.bind(2, &Value::String(key.clone()))?;
statement.next()?;
get_values_helper(&connection, key)
}
/// If the key exists, then merge new records with existing (last-write-wins) and put
pub fn republish_values_impl(
key: String,
records: Vec<Record>,
current_timestamp_sec: u64,
) -> Result<u64, ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 2)?;
republish_values_helper(key, records, current_timestamp_sec)
}
pub fn republish_values_helper(
key: String,
mut records: Vec<Record>,
current_timestamp_sec: u64,
) -> Result<u64, ServiceError> {
let connection = get_connection()?;
check_key_existence(&connection, key.clone(), current_timestamp_sec)?;
records = merge_impl(
get_values_helper(&connection, key.clone())?
.into_iter()
.chain(records.into_iter())
.collect(),
)?;
let mut updated = 0u64;
for record in records.into_iter() {
insert_or_replace_value(&connection, key.clone(), record, current_timestamp_sec)?;
updated += connection.changes() as u64;
}
Ok(updated)
}
/// Remove expired values and expired empty keys.
/// Expired means that `timestamp_created` has surpassed `expired_timeout`.
/// Return number of keys and values removed
pub fn clear_expired_impl(current_timestamp_sec: u64) -> Result<(u64, u64), ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 0)?;
let connection = get_connection()?;
let config = load_config();
let expired_host_timestamp = current_timestamp_sec - config.host_expired_timeout;
let expired_timestamp = current_timestamp_sec - config.expired_timeout;
let mut deleted_values = 0u64;
let host_id = call_parameters.host_id;
connection.execute(f!(
"DELETE FROM {VALUES_TABLE_NAME} WHERE key IN (SELECT key FROM {KEYS_TABLE_NAME} \
WHERE timestamp_created <= {expired_host_timestamp})"
))?;
deleted_values += connection.changes() as u64;
connection.execute(f!(
"DELETE FROM {VALUES_TABLE_NAME} WHERE timestamp_created <= {expired_host_timestamp}"
))?;
deleted_values += connection.changes() as u64;
let mut statement = connection.prepare(f!("DELETE FROM {VALUES_TABLE_NAME} WHERE key IN (SELECT key FROM {KEYS_TABLE_NAME} \
WHERE timestamp_created <= {expired_timestamp}) AND peer_id != ?"))?;
statement.bind(1, &Value::String(host_id.clone()))?;
statement.next()?;
deleted_values += connection.changes() as u64;
let mut statement = connection.prepare(f!("DELETE FROM {VALUES_TABLE_NAME} \
WHERE timestamp_created <= {expired_timestamp} AND peer_id != ?"))?;
statement.bind(1, &Value::String(host_id.clone()))?;
statement.next()?;
deleted_values += connection.changes() as u64;
connection.execute(f!(
"DELETE FROM {KEYS_TABLE_NAME} WHERE timestamp_created <= {expired_host_timestamp}"
))?;
let mut deleted_keys = connection.changes() as u64;
let mut statement = connection.prepare(f!("DELETE FROM {KEYS_TABLE_NAME} \
WHERE timestamp_created <= {expired_timestamp} AND pinned=0 AND \
key NOT IN (SELECT key FROM {VALUES_TABLE_NAME} WHERE peer_id = ?)"))?;
statement.bind(1, &Value::String(host_id))?;
statement.next()?;
deleted_keys += connection.changes() as u64;
Ok((deleted_keys, deleted_values))
}
/// Delete all stale keys and values except for pinned keys and host values.
/// Stale means that `timestamp_accessed` has surpassed `stale_timeout`.
/// Returns all deleted items
pub fn evict_stale_impl(current_timestamp_sec: u64) -> Result<Vec<EvictStaleItem>, ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 0)?;
let connection = get_connection()?;
let stale_timestamp = current_timestamp_sec - load_config().stale_timeout;
let mut stale_keys: Vec<Key> = vec![];
let mut statement = connection.prepare(f!(
"SELECT key, peer_id, timestamp_created, pinned, weight FROM {KEYS_TABLE_NAME} \
WHERE timestamp_accessed <= {stale_timestamp}"
))?;
while let State::Row = statement.next()? {
stale_keys.push(read_key(&statement)?);
}
let mut results: Vec<EvictStaleItem> = vec![];
let host_id = call_parameters.host_id;
for key in stale_keys.into_iter() {
let values = get_values_helper(&connection, key.key.clone())?;
let mut statement = connection.prepare(f!(
"DELETE FROM {VALUES_TABLE_NAME} WHERE key = ? AND set_by != ?"
))?;
statement.bind(1, &Value::String(key.key.clone()))?;
statement.bind(2, &Value::String(host_id.clone()))?;
statement.next()?;
if !key.pinned && !values.iter().any(|val| val.peer_id == host_id) {
let mut statement =
connection.prepare(f!("DELETE FROM {KEYS_TABLE_NAME} WHERE key = ?"))?;
statement.bind(1, &Value::String(key.key.clone()))?;
statement.next()?;
}
results.push(EvictStaleItem {
key,
records: values,
});
}
Ok(results)
}
/// Merge values with same peer_id by timestamp_created (last-write-wins)
pub fn merge_impl(records: Vec<Record>) -> Result<Vec<Record>, ServiceError> {
// (peer_id, set_by)
let mut result: HashMap<(String, String), Record> = HashMap::new();
for rec in records.into_iter() {
let key = (rec.peer_id.clone(), rec.set_by.clone());
if let Some(other_rec) = result.get_mut(&key) {
if other_rec.timestamp_created < rec.timestamp_created {
*other_rec = rec;
}
} else {
result.insert(key, rec);
}
}
Ok(result.into_iter().map(|(_, rec)| rec).collect())
}
/// Update timestamp_created of host value by key and caller peer_id
pub fn renew_host_value_impl(key: String, current_timestamp_sec: u64) -> Result<(), ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 1)?;
let connection = get_connection()?;
check_key_existence(&connection, key.clone(), current_timestamp_sec)?;
let set_by = call_parameters.init_peer_id;
let host_id = call_parameters.host_id;
let mut statement = connection.prepare(f!("UPDATE {VALUES_TABLE_NAME} \
SET timestamp_created = ?, timestamp_accessed = ? \
WHERE key = ? AND set_by = ? AND peer_id = ?"))?;
statement.bind(1, &Value::Integer(current_timestamp_sec as i64))?;
statement.bind(2, &Value::Integer(current_timestamp_sec as i64))?;
statement.bind(3, &Value::String(key.clone()))?;
statement.bind(4, &Value::String(set_by))?;
statement.bind(5, &Value::String(host_id))?;
statement.next()?;
(connection.changes() == 1).as_result((), HostValueNotFound(key))
}
/// Remove host value by key and caller peer_id
pub fn clear_host_value_impl(key: String, current_timestamp_sec: u64) -> Result<(), ServiceError> {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 1)?;
let connection = get_connection()?;
check_key_existence(&connection, key.clone(), current_timestamp_sec)?;
let peer_id = call_parameters.host_id;
let set_by = call_parameters.init_peer_id;
delete_value(&connection, &key, peer_id, set_by)?;
(connection.changes() == 1).as_result((), HostValueNotFound(key))
}
/// Used for replication of host values to other nodes.
/// Similar to republish_values but with an additional check that value.set_by == init_peer_id
pub fn propagate_host_value_impl(
mut set_host_value: PutHostValueResult,
current_timestamp_sec: u64,
weight: u32,
) -> Result<(), ServiceError> {
if !set_host_value.success || set_host_value.value.len() != 1 {
return Err(InvalidSetHostValueResult);
}
let call_parameters = marine_rs_sdk::get_call_parameters();
check_host_value_tetraplets(&call_parameters, 0, &set_host_value.value[0])?;
check_timestamp_tetraplets(&call_parameters, 1)?;
set_host_value.value[0].weight = weight;
republish_values_helper(
set_host_value.key,
set_host_value.value,
current_timestamp_sec,
)
.map(|_| ())
}

97
service/src/key.rs Normal file
View File

@ -0,0 +1,97 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::ServiceError;
use crate::misc::extract_public_key;
use fluence_keypair::Signature;
use marine_rs_sdk::marine;
use sha2::{Digest, Sha256};
#[marine]
#[derive(Default, Clone)]
pub struct Key {
pub key_id: String,
pub label: String,
pub peer_id: String,
pub timestamp_created: u64,
pub challenge: Vec<u8>,
pub challenge_type: String,
pub signature: Vec<u8>,
}
#[derive(Default, Clone)]
pub struct KeyInternal {
pub key: Key,
pub timestamp_published: u64,
pub pinned: bool,
pub weight: u32,
}
impl Key {
pub fn new(
label: String,
peer_id: String,
timestamp_created: u64,
challenge: Vec<u8>,
challenge_type: String,
signature: Vec<u8>,
) -> Self {
let key_id = Self::get_key_id(&label, &peer_id);
Self {
key_id,
label,
peer_id,
timestamp_created,
challenge,
challenge_type,
signature,
}
}
pub fn get_key_id(key: &str, peer_id: &str) -> String {
format!("{}{}", key, peer_id)
}
pub fn signature_bytes(&self) -> Vec<u8> {
let mut metadata = Vec::new();
metadata.extend(self.label.as_bytes());
metadata.extend(self.peer_id.as_bytes());
metadata.extend(self.timestamp_created.to_le_bytes());
metadata.extend(&self.challenge);
metadata.extend(self.challenge_type.as_bytes());
let mut hasher = Sha256::new();
hasher.update(metadata);
hasher.finalize().to_vec()
}
pub fn verify(&self, current_timestamp_sec: u64) -> Result<(), ServiceError> {
if self.timestamp_created > current_timestamp_sec {
return Err(ServiceError::InvalidKeyTimestamp);
}
self.verify_signature()
}
pub fn verify_signature(&self) -> Result<(), ServiceError> {
let pk = extract_public_key(self.peer_id.clone())?;
let bytes = self.signature_bytes();
let signature = Signature::from_bytes(pk.get_key_format(), self.signature.clone());
pk.verify(&bytes, &signature)
.map_err(|e| ServiceError::InvalidKeySignature(self.label.clone(), e))
}
}

139
service/src/key_api.rs Normal file
View File

@ -0,0 +1,139 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::ServiceError;
use crate::key::{Key, KeyInternal};
use crate::misc::check_weight_result;
use crate::results::{DhtResult, GetKeyMetadataResult, RegisterKeyResult};
use crate::storage_impl::get_storage;
use crate::tetraplets_checkers::{check_timestamp_tetraplets, check_weight_tetraplets};
use crate::{wrapped_try, WeightResult};
use marine_rs_sdk::marine;
#[marine]
pub fn get_key_bytes(
label: String,
mut peer_id: Vec<String>,
timestamp_created: u64,
challenge: Vec<u8>,
challenge_type: String,
) -> Vec<u8> {
Key {
label,
peer_id: peer_id
.pop()
.unwrap_or(marine_rs_sdk::get_call_parameters().init_peer_id),
timestamp_created,
challenge,
challenge_type,
..Default::default()
}
.signature_bytes()
}
#[marine]
pub fn get_key_id(label: String, peer_id: String) -> String {
Key::get_key_id(&label, &peer_id)
}
/// register new key if not exists with caller peer_id, update if exists with same peer_id or return error
#[marine]
pub fn register_key(
key: String,
peer_id: Vec<String>,
timestamp_created: u64,
challenge: Vec<u8>,
challenge_type: String,
signature: Vec<u8>,
pin: bool,
weight: WeightResult,
current_timestamp_sec: u64,
) -> RegisterKeyResult {
wrapped_try(|| {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_weight_tetraplets(&call_parameters, 7, 0)?;
check_timestamp_tetraplets(&call_parameters, 8)?;
let peer_id = peer_id
.get(0)
.unwrap_or(&call_parameters.init_peer_id)
.clone();
check_weight_result(&peer_id, &weight)?;
let key = Key::new(
key,
peer_id,
timestamp_created,
challenge,
challenge_type,
signature,
);
key.verify(current_timestamp_sec)?;
let key_id = key.key_id.clone();
let weight = weight.weight;
let storage = get_storage()?;
storage.update_key_timestamp(&key.key_id, current_timestamp_sec)?;
storage.update_key(KeyInternal {
key,
timestamp_published: 0,
pinned: pin,
weight,
})?;
Ok(key_id)
})
.into()
}
#[marine]
pub fn get_key_metadata(key_id: String, current_timestamp_sec: u64) -> GetKeyMetadataResult {
wrapped_try(|| {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 1)?;
let storage = get_storage()?;
storage.update_key_timestamp(&key_id, current_timestamp_sec)?;
storage.get_key(key_id)
})
.into()
}
/// Used for replication, same as register_key, but key.pinned is ignored, updates timestamp_accessed
#[marine]
pub fn republish_key(mut key: Key, weight: WeightResult, current_timestamp_sec: u64) -> DhtResult {
wrapped_try(|| {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_weight_tetraplets(&call_parameters, 1, 0)?;
check_weight_result(&key.peer_id, &weight)?;
check_timestamp_tetraplets(&call_parameters, 2)?;
key.verify(current_timestamp_sec)?;
// just to be sure
key.key_id = Key::get_key_id(&key.label, &key.peer_id);
let storage = get_storage()?;
storage.update_key_timestamp(&key.key_id, current_timestamp_sec)?;
match storage.update_key(KeyInternal {
key,
timestamp_published: 0,
pinned: false,
weight: weight.weight,
}) {
// we should ignore this error for republish
Err(ServiceError::KeyAlreadyExistsNewerTimestamp(_, _)) => Ok(()),
other => other,
}
})
.into()
}

View File

@ -0,0 +1,223 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::defaults::{KEYS_TABLE_NAME, KEYS_TIMESTAMPS_TABLE_NAME};
use crate::error::ServiceError;
use crate::error::ServiceError::{InternalError, KeyNotExists};
use crate::key::{Key, KeyInternal};
use crate::storage_impl::Storage;
use marine_sqlite_connector::{State, Statement, Value};
impl Storage {
pub fn create_keys_tables(&self) -> bool {
self.connection
.execute(f!("
CREATE TABLE IF NOT EXISTS {KEYS_TABLE_NAME} (
key_id TEXT PRIMARY KEY,
label TEXT,
peer_id TEXT,
timestamp_created INTEGER,
challenge BLOB,
challenge_type TEXT,
signature BLOB NOT NULL,
timestamp_published INTEGER,
pinned INTEGER,
weight INTEGER
);
"))
.is_ok()
&& self
.connection
.execute(f!("
CREATE TABLE IF NOT EXISTS {KEYS_TIMESTAMPS_TABLE_NAME} (
key_id TEXT PRIMARY KEY,
timestamp_accessed INTEGER
);
"))
.is_ok()
}
pub fn update_key_timestamp(
&self,
key_id: &str,
current_timestamp_sec: u64,
) -> Result<(), ServiceError> {
let mut statement = self.connection.prepare(f!("
INSERT OR REPLACE INTO {KEYS_TIMESTAMPS_TABLE_NAME} VALUES (?, ?);
"))?;
statement.bind(1, &Value::String(key_id.to_string()))?;
statement.bind(2, &Value::Integer(current_timestamp_sec as i64))?;
statement.next()?;
Ok(())
}
pub fn get_key(&self, key_id: String) -> Result<Key, ServiceError> {
let mut statement = self.connection.prepare(f!(
"SELECT key_id, label, peer_id, timestamp_created, challenge, challenge_type, signature \
FROM {KEYS_TABLE_NAME} WHERE key_id = ?"
))?;
statement.bind(1, &Value::String(key_id.clone()))?;
if let State::Row = statement.next()? {
read_key(&statement)
} else {
Err(KeyNotExists(key_id))
}
}
pub fn write_key(&self, key: KeyInternal) -> Result<(), ServiceError> {
let mut statement = self.connection.prepare(f!("
INSERT OR REPLACE INTO {KEYS_TABLE_NAME} VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
"))?;
let pinned = if key.pinned { 1 } else { 0 } as i64;
statement.bind(1, &Value::String(key.key.key_id))?;
statement.bind(2, &Value::String(key.key.label))?;
statement.bind(3, &Value::String(key.key.peer_id))?;
statement.bind(4, &Value::Integer(key.key.timestamp_created as i64))?;
statement.bind(5, &Value::Binary(key.key.challenge))?;
statement.bind(6, &Value::String(key.key.challenge_type))?;
statement.bind(7, &Value::Binary(key.key.signature))?;
statement.bind(8, &Value::Integer(key.timestamp_published as i64))?;
statement.bind(9, &Value::Integer(pinned))?;
statement.bind(10, &Value::Integer(key.weight as i64))?;
statement.next()?;
Ok(())
}
pub fn update_key(&self, key: KeyInternal) -> Result<(), ServiceError> {
if let Ok(existing_key) = self.get_key(key.key.key_id.clone()) {
if existing_key.timestamp_created > key.key.timestamp_created {
return Err(ServiceError::KeyAlreadyExistsNewerTimestamp(
key.key.label,
key.key.peer_id,
));
}
}
self.write_key(key)
}
pub fn check_key_existence(&self, key_id: &str) -> Result<(), ServiceError> {
let mut statement = self.connection.prepare(f!(
"SELECT EXISTS(SELECT 1 FROM {KEYS_TABLE_NAME} WHERE key_id = ? LIMIT 1)"
))?;
statement.bind(1, &Value::String(key_id.to_string()))?;
if let State::Row = statement.next()? {
let exists = statement.read::<i64>(0)?;
if exists == 1 {
Ok(())
} else {
Err(KeyNotExists(key_id.to_string()))
}
} else {
Err(InternalError(
"EXISTS should always return something".to_string(),
))
}
}
pub fn get_stale_keys(&self, stale_timestamp: u64) -> Result<Vec<KeyInternal>, ServiceError> {
let mut statement = self.connection.prepare(f!(
"SELECT label, peer_id, timestamp_created, challenge, challenge_type, signature, timestamp_published, pinned, weight \
FROM {KEYS_TABLE_NAME} WHERE timestamp_published <= ?"
))?;
statement.bind(1, &Value::Integer(stale_timestamp as i64))?;
let mut stale_keys: Vec<KeyInternal> = vec![];
while let State::Row = statement.next()? {
stale_keys.push(read_internal_key(&statement)?);
}
Ok(stale_keys)
}
pub fn delete_key(&self, key_id: String) -> Result<(), ServiceError> {
let mut statement = self
.connection
.prepare(f!("DELETE FROM {KEYS_TABLE_NAME} WHERE key_id=?"))?;
statement.bind(1, &Value::String(key_id.clone()))?;
statement.next().map(drop)?;
if self.connection.changes() == 1 {
Ok(())
} else {
Err(KeyNotExists(key_id))
}
}
/// not pinned only
pub fn get_expired_keys(&self, expired_timestamp: u64) -> Result<Vec<Key>, ServiceError> {
let mut statement = self.connection.prepare(f!(
"SELECT label, peer_id, timestamp_created, challenge, challenge_type, signature \
FROM {KEYS_TABLE_NAME} WHERE timestamp_created <= ? and pinned != 1"
))?;
statement.bind(1, &Value::Integer(expired_timestamp as i64))?;
let mut expired_keys: Vec<Key> = vec![];
while let State::Row = statement.next()? {
let key = read_key(&statement)?;
let timestamp_accessed = self.get_key_timestamp_accessed(&key.key_id)?;
let with_host_records = self.get_host_records_count_by_key(key.key_id.clone())? != 0;
if timestamp_accessed <= expired_timestamp && !with_host_records {
expired_keys.push(key);
}
}
Ok(expired_keys)
}
pub fn get_key_timestamp_accessed(&self, key_id: &str) -> Result<u64, ServiceError> {
let mut statement = self.connection.prepare(f!(
"SELECT timestamp_accessed FROM {KEYS_TIMESTAMPS_TABLE_NAME} WHERE key_id != ?"
))?;
statement.bind(1, &Value::String(key_id.to_string()))?;
if let State::Row = statement.next()? {
statement
.read::<i64>(0)
.map(|t| t as u64)
.map_err(ServiceError::SqliteError)
} else {
Err(KeyNotExists(key_id.to_string()))
}
}
}
pub fn read_key(statement: &Statement) -> Result<Key, ServiceError> {
Ok(Key {
key_id: statement.read::<String>(0)?,
label: statement.read::<String>(1)?,
peer_id: statement.read::<String>(2)?,
timestamp_created: statement.read::<i64>(3)? as u64,
challenge: statement.read::<Vec<u8>>(4)?,
challenge_type: statement.read::<String>(5)?,
signature: statement.read::<Vec<u8>>(6)?,
})
}
pub fn read_internal_key(statement: &Statement) -> Result<KeyInternal, ServiceError> {
Ok(KeyInternal {
key: read_key(statement)?,
timestamp_published: statement.read::<i64>(7)? as u64,
pinned: statement.read::<i64>(8)? != 0,
weight: statement.read::<i64>(9)? as u32,
})
}

View File

@ -13,169 +13,74 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use marine_rs_sdk::marine;
use marine_rs_sdk::module_manifest;
use crate::config::{create_config, load_config, write_config};
use crate::impls::{
clear_expired_impl, clear_host_value_impl, create_keys_table, create_values_table,
evict_stale_impl, get_key_metadata_impl, get_values_impl, merge_impl,
propagate_host_value_impl, put_value_impl, register_key_impl, renew_host_value_impl,
republish_key_impl, republish_values_impl,
};
use crate::results::{
ClearExpiredResult, DhtResult, EvictStaleResult, GetKeyMetadataResult, GetValuesResult, Key,
MergeResult, PutHostValueResult, Record, RepublishValuesResult,
};
use crate::results::{ClearExpiredResult, EvictStaleResult};
use crate::storage_impl::get_storage;
use crate::tetraplets_checkers::check_timestamp_tetraplets;
mod config;
mod defaults;
mod error;
mod impls;
mod key;
mod key_api;
mod key_storage_impl;
mod misc;
mod record;
mod record_api;
mod record_storage_impl;
mod results;
mod storage_impl;
mod tests;
mod tetraplets_checkers;
#[macro_use]
extern crate fstrings;
module_manifest!();
pub fn wrapped_try<F, T>(func: F) -> T
where
F: FnOnce() -> T,
{
func()
}
// TODO: ship tg results as crate, remove duplication
#[marine]
pub struct WeightResult {
pub success: bool,
pub weight: u32,
pub peer_id: String,
pub error: String,
}
fn main() {
create_keys_table();
create_values_table();
let storage = get_storage().unwrap();
storage.create_keys_tables();
storage.create_values_table();
create_config();
}
// KEYS
#[marine]
pub fn register_key(key: String, current_timestamp_sec: u64, pin: bool, weight: u32) -> DhtResult {
register_key_impl(key, current_timestamp_sec, pin, weight).into()
}
#[marine]
pub fn get_key_metadata(key: String, current_timestamp_sec: u64) -> GetKeyMetadataResult {
get_key_metadata_impl(key, current_timestamp_sec).into()
}
#[marine]
pub fn republish_key(key: Key, current_timestamp_sec: u64) -> DhtResult {
republish_key_impl(key, current_timestamp_sec).into()
}
// VALUES
#[marine]
pub fn put_value(
key: String,
value: String,
current_timestamp_sec: u64,
relay_id: Vec<String>,
service_id: Vec<String>,
weight: u32,
) -> DhtResult {
put_value_impl(
key,
value,
current_timestamp_sec,
relay_id,
service_id,
weight,
false,
)
.map(|_| ())
pub fn clear_expired(current_timestamp_sec: u64) -> ClearExpiredResult {
wrapped_try(|| {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 0)?;
get_storage()?.clear_expired(current_timestamp_sec)
})
.into()
}
#[marine]
pub fn put_host_value(
key: String,
value: String,
current_timestamp_sec: u64,
relay_id: Vec<String>,
service_id: Vec<String>,
weight: u32,
) -> PutHostValueResult {
let mut result: PutHostValueResult = put_value_impl(
key.clone(),
value,
current_timestamp_sec,
relay_id,
service_id,
weight,
true,
)
.into();
// key is needed to be passed to propagate_host_value
result.key = key;
result
}
#[marine]
pub fn propagate_host_value(
set_host_value: PutHostValueResult,
current_timestamp_sec: u64,
weight: u32,
) -> DhtResult {
propagate_host_value_impl(set_host_value, current_timestamp_sec, weight).into()
}
#[marine]
pub fn get_values(key: String, current_timestamp_sec: u64) -> GetValuesResult {
get_values_impl(key, current_timestamp_sec).into()
}
#[marine]
pub fn republish_values(
key: String,
records: Vec<Record>,
current_timestamp_sec: u64,
) -> RepublishValuesResult {
republish_values_impl(key, records, current_timestamp_sec).into()
}
#[marine]
pub fn renew_host_value(key: String, current_timestamp_sec: u64) -> DhtResult {
renew_host_value_impl(key, current_timestamp_sec).into()
}
#[marine]
pub fn clear_host_value(key: String, current_timestamp_sec: u64) -> DhtResult {
clear_host_value_impl(key, current_timestamp_sec).into()
}
// BOTH
#[marine]
pub fn clear_expired(current_timestamp_sec: u64) -> ClearExpiredResult {
clear_expired_impl(current_timestamp_sec).into()
}
#[marine]
pub fn evict_stale(current_timestamp_sec: u64) -> EvictStaleResult {
evict_stale_impl(current_timestamp_sec).into()
}
#[marine]
pub fn merge(records: Vec<Vec<Record>>) -> MergeResult {
merge_impl(records.into_iter().flatten().collect()).into()
}
#[marine]
pub fn merge_two(a: Vec<Record>, b: Vec<Record>) -> MergeResult {
merge_impl(a.into_iter().chain(b.into_iter()).collect()).into()
}
#[marine]
pub fn merge_hack_get_values(records: Vec<GetValuesResult>) -> MergeResult {
merge_impl(
records
.into_iter()
.filter(|elem| elem.success)
.map(|elem| elem.result)
.flatten()
.collect(),
)
wrapped_try(|| {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 0)?;
get_storage()?.evict_stale(current_timestamp_sec)
})
.into()
}
@ -186,13 +91,6 @@ pub fn set_expired_timeout(timeout_sec: u64) {
write_config(config);
}
#[marine]
pub fn set_host_expired_timeout(timeout_sec: u64) {
let mut config = load_config();
config.host_expired_timeout = timeout_sec;
write_config(config);
}
#[marine]
pub fn set_stale_timeout(timeout_sec: u64) {
let mut config = load_config();

42
service/src/misc.rs Normal file
View File

@ -0,0 +1,42 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::ServiceError;
use crate::WeightResult;
use boolinator::Boolinator;
use fluence_keypair::PublicKey;
use libp2p_core::PeerId;
use std::convert::TryFrom;
use std::str::FromStr;
fn parse_peer_id(peer_id: String) -> Result<PeerId, ServiceError> {
PeerId::from_str(&peer_id).map_err(|e| ServiceError::PeerIdParseError(format!("{:?}", e)))
}
pub fn extract_public_key(peer_id: String) -> Result<PublicKey, ServiceError> {
PublicKey::try_from(
parse_peer_id(peer_id)
.map_err(|e| ServiceError::PublicKeyExtractionError(e.to_string()))?,
)
.map_err(ServiceError::PublicKeyDecodeError)
}
pub fn check_weight_result(peer_id: &str, weight: &WeightResult) -> Result<(), ServiceError> {
(weight.success && weight.peer_id.eq(peer_id)).as_result(
(),
ServiceError::InvalidWeightPeerId(peer_id.to_string(), weight.peer_id.clone()),
)
}

86
service/src/record.rs Normal file
View File

@ -0,0 +1,86 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::ServiceError;
use crate::misc::extract_public_key;
use fluence_keypair::Signature;
use marine_rs_sdk::marine;
use sha2::{Digest, Sha256};
#[marine]
#[derive(Debug, Default, Clone)]
pub struct Record {
pub key_id: String,
pub value: String,
pub peer_id: String,
pub set_by: String,
pub relay_id: Vec<String>,
pub service_id: Vec<String>,
pub timestamp_created: u64,
pub solution: Vec<u8>,
pub signature: Vec<u8>,
}
#[derive(Default, Debug, Clone)]
pub struct RecordInternal {
pub record: Record,
pub weight: u32,
}
impl Record {
pub fn signature_bytes(&self) -> Vec<u8> {
let mut metadata = Vec::new();
metadata.extend(self.key_id.as_bytes());
metadata.extend(self.value.as_bytes());
metadata.extend(self.peer_id.as_bytes());
metadata.extend(self.set_by.as_bytes());
if !self.relay_id.is_empty() {
metadata.extend(self.relay_id.len().to_le_bytes());
for id in &self.relay_id {
metadata.extend(id.as_bytes());
}
}
if !self.service_id.is_empty() {
metadata.extend(self.service_id.len().to_le_bytes());
for id in &self.service_id {
metadata.extend(id.as_bytes());
}
}
metadata.extend(self.timestamp_created.to_le_bytes());
metadata.extend(&self.solution);
let mut hasher = Sha256::new();
hasher.update(metadata);
hasher.finalize().to_vec()
}
pub fn verify(&self, current_timestamp_sec: u64) -> Result<(), ServiceError> {
if self.timestamp_created > current_timestamp_sec {
return Err(ServiceError::InvalidRecordTimestamp);
}
let pk = extract_public_key(self.peer_id.clone())?;
let bytes = self.signature_bytes();
let signature = Signature::from_bytes(pk.get_key_format(), self.signature.clone());
pk.verify(&bytes, &signature).map_err(|e| {
ServiceError::InvalidRecordSignature(self.key_id.clone(), self.value.clone(), e)
})
}
}

315
service/src/record_api.rs Normal file
View File

@ -0,0 +1,315 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use boolinator::Boolinator;
use crate::error::ServiceError;
use crate::error::ServiceError::MissingRecordWeight;
use crate::misc::check_weight_result;
use crate::record::{Record, RecordInternal};
use crate::record_storage_impl::merge_records;
use crate::results::{
DhtResult, GetValuesResult, MergeResult, PutHostRecordResult, RepublishValuesResult,
};
use crate::storage_impl::get_storage;
use crate::tetraplets_checkers::{
check_host_value_tetraplets, check_timestamp_tetraplets, check_weight_tetraplets,
};
use crate::{wrapped_try, WeightResult};
use marine_rs_sdk::marine;
#[marine]
pub fn get_record_bytes(
key_id: String,
value: String,
relay_id: Vec<String>,
service_id: Vec<String>,
timestamp_created: u64,
solution: Vec<u8>,
) -> Vec<u8> {
let cp = marine_rs_sdk::get_call_parameters();
Record {
key_id,
value,
peer_id: cp.init_peer_id.clone(),
set_by: cp.init_peer_id,
relay_id,
service_id,
timestamp_created,
solution,
..Default::default()
}
.signature_bytes()
}
#[marine]
pub fn put_record(
key_id: String,
value: String,
relay_id: Vec<String>,
service_id: Vec<String>,
timestamp_created: u64,
solution: Vec<u8>,
signature: Vec<u8>,
weight: WeightResult,
current_timestamp_sec: u64,
) -> DhtResult {
wrapped_try(|| {
let cp = marine_rs_sdk::get_call_parameters();
check_weight_tetraplets(&cp, 7, 0)?;
check_timestamp_tetraplets(&cp, 8)?;
check_weight_result(&cp.init_peer_id, &weight)?;
let record = Record {
key_id,
value,
peer_id: cp.init_peer_id.clone(),
set_by: cp.init_peer_id,
relay_id,
service_id,
timestamp_created,
solution,
signature,
};
record.verify(current_timestamp_sec)?;
let storage = get_storage()?;
storage.check_key_existence(&record.key_id)?;
storage
.update_record(
RecordInternal {
record,
weight: weight.weight,
},
false,
)
.map(|_| {})
})
.into()
}
#[marine]
pub fn get_host_record_bytes(
key_id: String,
value: String,
relay_id: Vec<String>,
service_id: Vec<String>,
timestamp_created: u64,
solution: Vec<u8>,
) -> Vec<u8> {
let cp = marine_rs_sdk::get_call_parameters();
Record {
key_id,
value,
peer_id: cp.host_id,
set_by: cp.init_peer_id,
relay_id,
service_id,
timestamp_created,
solution,
..Default::default()
}
.signature_bytes()
}
#[marine]
pub fn put_host_record(
key_id: String,
value: String,
relay_id: Vec<String>,
service_id: Vec<String>,
timestamp_created: u64,
solution: Vec<u8>,
signature: Vec<u8>,
weight: WeightResult,
current_timestamp_sec: u64,
) -> PutHostRecordResult {
wrapped_try(|| {
let cp = marine_rs_sdk::get_call_parameters();
check_weight_tetraplets(&cp, 7, 0)?;
check_timestamp_tetraplets(&cp, 8)?;
check_weight_result(&cp.init_peer_id, &weight)?;
let record = Record {
key_id,
value,
peer_id: cp.host_id,
set_by: cp.init_peer_id,
relay_id,
service_id,
timestamp_created,
solution,
signature,
};
record.verify(current_timestamp_sec)?;
let storage = get_storage()?;
storage.check_key_existence(&record.key_id)?;
storage.update_record(
RecordInternal {
record: record.clone(),
weight: weight.weight,
},
true,
)?;
Ok(record)
})
.into()
}
/// Used for replication of host values to other nodes.
/// Similar to republish_values but with an additional check that value.set_by == init_peer_id
#[marine]
pub fn propagate_host_record(
set_host_value: PutHostRecordResult,
current_timestamp_sec: u64,
weight: WeightResult,
) -> DhtResult {
wrapped_try(|| {
if !set_host_value.success || set_host_value.value.len() != 1 {
return Err(ServiceError::InvalidSetHostValueResult);
}
let mut record = set_host_value.value[0].clone();
record.verify(current_timestamp_sec)?;
let call_parameters = marine_rs_sdk::get_call_parameters();
check_host_value_tetraplets(&call_parameters, 0, &record)?;
check_timestamp_tetraplets(&call_parameters, 1)?;
check_weight_tetraplets(&call_parameters, 2, 0)?;
check_weight_result(&record.peer_id, &weight)?;
let weight = weight.weight;
let storage = get_storage()?;
storage.check_key_existence(&record.key_id)?;
storage.update_key_timestamp(&record.key_id, current_timestamp_sec)?;
storage
.merge_and_update_records(
record.key_id.clone(),
vec![RecordInternal { record, weight }],
)
.map(|_| ())
})
.into()
}
/// Return all values by key
#[marine]
pub fn get_records(key_id: String, current_timestamp_sec: u64) -> GetValuesResult {
wrapped_try(|| {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 1)?;
let storage = get_storage()?;
storage.check_key_existence(&key_id)?;
storage.update_key_timestamp(&key_id, current_timestamp_sec)?;
storage
.get_records(key_id)
.map(|records| records.into_iter().map(|r| r.record).collect())
})
.into()
}
/// If the key exists, then merge new records with existing (last-write-wins) and put
#[marine]
pub fn republish_records(
records: Vec<Record>,
weights: Vec<WeightResult>,
current_timestamp_sec: u64,
) -> RepublishValuesResult {
wrapped_try(|| {
if records.is_empty() {
return Ok(0);
}
let key_id = records[0].key_id.clone();
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 2)?;
let mut records_to_merge = vec![];
for (i, record) in records.into_iter().enumerate() {
record.verify(current_timestamp_sec)?;
check_weight_tetraplets(&call_parameters, 1, i)?;
let weight_result = weights.get(i).ok_or(MissingRecordWeight(
record.peer_id.clone(),
record.set_by.clone(),
))?;
check_weight_result(&record.set_by, weight_result)?;
if record.key_id != key_id {
return Err(ServiceError::RecordsPublishingError);
}
records_to_merge.push(RecordInternal {
record,
weight: weight_result.weight,
});
}
let storage = get_storage()?;
storage.check_key_existence(&key_id)?;
storage.update_key_timestamp(&key_id, current_timestamp_sec)?;
storage.merge_and_update_records(key_id, records_to_merge)
})
.into()
}
/// Remove host value by key and caller peer_id
#[marine]
pub fn clear_host_record(key_id: String, current_timestamp_sec: u64) -> DhtResult {
wrapped_try(|| {
let call_parameters = marine_rs_sdk::get_call_parameters();
check_timestamp_tetraplets(&call_parameters, 1)?;
let storage = get_storage()?;
storage.check_key_existence(&key_id)?;
storage.update_key_timestamp(&key_id, current_timestamp_sec)?;
let peer_id = call_parameters.host_id;
let set_by = call_parameters.init_peer_id;
let deleted = storage.delete_record(key_id.clone(), peer_id, set_by)?;
deleted.as_result((), ServiceError::HostValueNotFound(key_id))
})
.into()
}
#[marine]
pub fn merge_two(a: Vec<Record>, b: Vec<Record>) -> MergeResult {
merge_records(
a.into_iter()
.chain(b.into_iter())
.map(|record| RecordInternal {
record,
..Default::default()
})
.collect(),
)
.map(|recs| recs.into_iter().map(|r| r.record).collect())
.into()
}
#[marine]
pub fn merge(records: Vec<Vec<Record>>) -> MergeResult {
merge_records(
records
.into_iter()
.flatten()
.map(|record| RecordInternal {
record,
..Default::default()
})
.collect(),
)
.map(|recs| recs.into_iter().map(|r| r.record).collect())
.into()
}

View File

@ -0,0 +1,280 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::collections::HashMap;
use crate::defaults::{RECORDS_TABLE_NAME, VALUES_LIMIT};
use crate::error::ServiceError;
use crate::error::ServiceError::InternalError;
use crate::record::{Record, RecordInternal};
use crate::storage_impl::{from_custom_option, get_custom_option, Storage};
use marine_sqlite_connector::{State, Statement, Value};
impl Storage {
pub fn create_values_table(&self) -> bool {
self.connection
.execute(f!("
CREATE TABLE IF NOT EXISTS {RECORDS_TABLE_NAME} (
key_id TEXT,
value TEXT,
peer_id TEXT,
set_by TEXT,
relay_id TEXT,
service_id TEXT,
timestamp_created INTEGER,
solution BLOB,
signature BLOB NOT NULL,
weight INTEGER,
PRIMARY KEY (key_id, peer_id, set_by)
);
"))
.is_ok()
}
/// Put value with caller peer_id if the key exists.
/// If the value is NOT a host value and the key already has `VALUES_LIMIT` records, then a value with the smallest weight is removed and the new value is inserted instead.
pub fn update_record(&self, record: RecordInternal, host: bool) -> Result<(), ServiceError> {
let records_count = self.get_non_host_records_count_by_key(record.record.key_id.clone())?;
// check values limits for non-host values
if !host && records_count >= VALUES_LIMIT {
let min_weight_record =
self.get_min_weight_non_host_record_by_key(record.record.key_id.clone())?;
if min_weight_record.weight < record.weight
|| (min_weight_record.weight == record.weight
&& min_weight_record.record.timestamp_created < record.record.timestamp_created)
{
// delete the lightest record if the new one is heavier or newer
self.delete_record(
min_weight_record.record.key_id,
min_weight_record.record.peer_id,
min_weight_record.record.set_by,
)?;
} else {
// return error if limit is exceeded
return Err(ServiceError::ValuesLimitExceeded(record.record.key_id));
}
}
self.write_record(record)?;
Ok(())
}
pub fn write_record(&self, record: RecordInternal) -> Result<(), ServiceError> {
let mut statement = self.connection.prepare(f!(
"INSERT OR REPLACE INTO {RECORDS_TABLE_NAME} VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
))?;
statement.bind(1, &Value::String(record.record.key_id))?;
statement.bind(2, &Value::String(record.record.value))?;
statement.bind(3, &Value::String(record.record.peer_id))?;
statement.bind(4, &Value::String(record.record.set_by))?;
statement.bind(
5,
&Value::String(from_custom_option(record.record.relay_id)),
)?;
statement.bind(
6,
&Value::String(from_custom_option(record.record.service_id)),
)?;
statement.bind(7, &Value::Integer(record.record.timestamp_created as i64))?;
statement.bind(8, &Value::Binary(record.record.solution))?;
statement.bind(9, &Value::Binary(record.record.signature))?;
statement.bind(10, &Value::Integer(record.weight as i64))?;
statement.next().map(drop)?;
Ok(())
}
pub fn delete_record(
&self,
key_id: String,
peer_id: String,
set_by: String,
) -> Result<bool, ServiceError> {
let mut statement = self.connection.prepare(f!(
"DELETE FROM {RECORDS_TABLE_NAME} WHERE key_id=? AND peer_id=? AND set_by=?"
))?;
statement.bind(1, &Value::String(key_id))?;
statement.bind(2, &Value::String(peer_id))?;
statement.bind(3, &Value::String(set_by))?;
statement.next().map(drop)?;
Ok(self.connection.changes() == 1)
}
fn get_min_weight_non_host_record_by_key(
&self,
key_id: String,
) -> Result<RecordInternal, ServiceError> {
let host_id = marine_rs_sdk::get_call_parameters().host_id;
// only only non-host values
let mut statement = self.connection.prepare(
f!("SELECT key_id, value, peer_id, set_by, relay_id, service_id, timestamp_created, signature, weight FROM {RECORDS_TABLE_NAME} \
WHERE key_id = ? AND peer_id != ? ORDER BY weight ASC LIMIT 1"))?;
statement.bind(1, &Value::String(key_id.clone()))?;
statement.bind(2, &Value::String(host_id))?;
if let State::Row = statement.next()? {
read_record(&statement)
} else {
Err(InternalError(f!(
"not found non-host records for given key_id: {key_id}"
)))
}
}
fn get_non_host_records_count_by_key(&self, key: String) -> Result<usize, ServiceError> {
let host_id = marine_rs_sdk::get_call_parameters().host_id;
// only only non-host values
let mut statement = self.connection.prepare(f!(
"SELECT COUNT(*) FROM {RECORDS_TABLE_NAME} WHERE key_id = ? AND peer_id != ?"
))?;
statement.bind(1, &Value::String(key))?;
statement.bind(2, &Value::String(host_id))?;
if let State::Row = statement.next()? {
statement
.read::<i64>(0)
.map(|n| n as usize)
.map_err(ServiceError::SqliteError)
} else {
Err(InternalError(f!(
"get_non_host_records_count_by_key: something went totally wrong"
)))
}
}
pub fn get_host_records_count_by_key(&self, key_id: String) -> Result<u64, ServiceError> {
let host_id = marine_rs_sdk::get_call_parameters().host_id;
// only only non-host values
let mut statement = self.connection.prepare(f!(
"SELECT COUNT(*) FROM {RECORDS_TABLE_NAME} WHERE key_id = ? AND peer_id = ?"
))?;
statement.bind(1, &Value::String(key_id))?;
statement.bind(2, &Value::String(host_id))?;
if let State::Row = statement.next()? {
statement
.read::<i64>(0)
.map(|n| n as u64)
.map_err(ServiceError::SqliteError)
} else {
Err(InternalError(f!(
"get_non_host_records_count_by_key: something went totally wrong"
)))
}
}
pub fn merge_and_update_records(
&self,
key_id: String,
records: Vec<RecordInternal>,
) -> Result<u64, ServiceError> {
let records = merge_records(
self.get_records(key_id)?
.into_iter()
.chain(records.into_iter())
.collect(),
)?;
let mut updated = 0u64;
for record in records.into_iter() {
self.write_record(record)?;
updated += self.connection.changes() as u64;
}
Ok(updated)
}
pub fn get_records(&self, key_id: String) -> Result<Vec<RecordInternal>, ServiceError> {
let mut statement = self.connection.prepare(
f!("SELECT key_id, value, peer_id, set_by, relay_id, service_id, timestamp_created, solution, signature, weight FROM {RECORDS_TABLE_NAME} \
WHERE key_id = ? ORDER BY weight DESC"))?;
statement.bind(1, &Value::String(key_id))?;
let mut result: Vec<RecordInternal> = vec![];
while let State::Row = statement.next()? {
result.push(read_record(&statement)?)
}
Ok(result)
}
/// except host records
pub fn clear_expired_records(&self, expired_timestamp: u64) -> Result<u64, ServiceError> {
let host_id = marine_rs_sdk::get_call_parameters().host_id;
self.connection.execute(f!(
"DELETE FROM {RECORDS_TABLE_NAME} WHERE timestamp_created <= {expired_timestamp} AND peer_id != {host_id}"
))?;
Ok(self.connection.changes() as u64)
}
/// except host records and for pinned keys
pub fn delete_records_by_key(&self, key_id: String) -> Result<u64, ServiceError> {
let mut statement = self
.connection
.prepare(f!("DELETE FROM {RECORDS_TABLE_NAME} WHERE key_id = ?"))?;
statement.bind(1, &Value::String(key_id))?;
statement.next().map(drop)?;
Ok(self.connection.changes() as u64)
}
}
pub fn read_record(statement: &Statement) -> Result<RecordInternal, ServiceError> {
Ok(RecordInternal {
record: Record {
key_id: statement.read::<String>(0)?,
value: statement.read::<String>(1)?,
peer_id: statement.read::<String>(2)?,
set_by: statement.read::<String>(3)?,
relay_id: get_custom_option(statement.read::<String>(4)?),
service_id: get_custom_option(statement.read::<String>(5)?),
timestamp_created: statement.read::<i64>(6)? as u64,
solution: statement.read::<Vec<u8>>(7)?,
signature: statement.read::<Vec<u8>>(8)?,
},
weight: statement.read::<i64>(9)? as u32,
})
}
/// Merge values with same peer_id by timestamp_created (last-write-wins)
pub fn merge_records(records: Vec<RecordInternal>) -> Result<Vec<RecordInternal>, ServiceError> {
// key is (peer_id, set_by)
let mut result: HashMap<(String, String), RecordInternal> = HashMap::new();
for rec in records.into_iter() {
let key = (rec.record.peer_id.clone(), rec.record.set_by.clone());
if let Some(other_rec) = result.get_mut(&key) {
if other_rec.record.timestamp_created < rec.record.timestamp_created {
*other_rec = rec;
}
} else {
result.insert(key, rec);
}
}
Ok(result.into_iter().map(|(_, rec)| rec).collect())
}

View File

@ -15,6 +15,8 @@
*/
use crate::error::ServiceError;
use crate::key::Key;
use crate::record::Record;
use marine_rs_sdk::marine;
#[marine]
@ -40,15 +42,28 @@ impl From<Result<(), ServiceError>> for DhtResult {
}
#[marine]
#[derive(Debug, Default, Clone)]
pub struct Record {
pub value: String,
pub peer_id: String,
pub set_by: String,
pub relay_id: Vec<String>,
pub service_id: Vec<String>,
pub timestamp_created: u64,
pub weight: u32,
#[derive(Debug)]
pub struct RegisterKeyResult {
pub success: bool,
pub error: String,
pub key_id: String,
}
impl From<Result<String, ServiceError>> for RegisterKeyResult {
fn from(result: Result<String, ServiceError>) -> Self {
match result {
Ok(key_id) => Self {
success: true,
error: "".to_string(),
key_id,
},
Err(err) => Self {
success: false,
error: err.to_string(),
key_id: "".to_string(),
},
}
}
}
#[marine]
@ -129,16 +144,6 @@ impl From<Result<Vec<Record>, ServiceError>> for GetStaleRecordsResult {
}
}
#[marine]
#[derive(Default, Clone)]
pub struct Key {
pub key: String,
pub peer_id: String,
pub timestamp_created: u64,
pub pinned: bool,
pub weight: u32,
}
#[marine]
pub struct GetKeyMetadataResult {
pub success: bool,
@ -217,6 +222,30 @@ impl From<Result<Vec<EvictStaleItem>, ServiceError>> for EvictStaleResult {
}
}
#[marine]
pub struct PutHostRecordResult {
pub success: bool,
pub error: String,
pub value: Vec<Record>,
}
impl From<Result<Record, ServiceError>> for PutHostRecordResult {
fn from(result: Result<Record, ServiceError>) -> Self {
match result {
Ok(result) => Self {
success: true,
error: "".to_string(),
value: vec![result],
},
Err(err) => Self {
success: false,
error: err.to_string(),
value: vec![],
},
}
}
}
#[marine]
#[derive(Debug)]
pub struct MergeResult {
@ -241,30 +270,3 @@ impl From<Result<Vec<Record>, ServiceError>> for MergeResult {
}
}
}
#[marine]
pub struct PutHostValueResult {
pub success: bool,
pub error: String,
pub key: String,
pub value: Vec<Record>,
}
impl From<Result<Record, ServiceError>> for PutHostValueResult {
fn from(result: Result<Record, ServiceError>) -> Self {
match result {
Ok(result) => Self {
success: true,
error: "".to_string(),
key: "".to_string(),
value: vec![result],
},
Err(err) => Self {
success: false,
error: err.to_string(),
key: "".to_string(),
value: vec![],
},
}
}
}

110
service/src/storage_impl.rs Normal file
View File

@ -0,0 +1,110 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::config::load_config;
use crate::defaults::DB_PATH;
use crate::error::ServiceError;
use crate::record::Record;
use crate::results::EvictStaleItem;
use marine_sqlite_connector::{Connection, Result as SqliteResult};
pub struct Storage {
pub(crate) connection: Connection,
}
#[inline]
pub(crate) fn get_storage() -> SqliteResult<Storage> {
marine_sqlite_connector::open(DB_PATH).map(|c| Storage { connection: c })
}
pub fn get_custom_option(value: String) -> Vec<String> {
if value.is_empty() {
vec![]
} else {
vec![value]
}
}
pub fn from_custom_option(value: Vec<String>) -> String {
if value.is_empty() {
"".to_string()
} else {
value[0].clone()
}
}
impl Storage {
/// Remove expired values and expired empty keys.
/// Expired means that `timestamp_created` has surpassed `expired_timeout`.
/// Return number of keys and values removed
pub fn clear_expired(&self, current_timestamp_sec: u64) -> Result<(u64, u64), ServiceError> {
let config = load_config();
let expired_timestamp = current_timestamp_sec - config.expired_timeout;
let mut deleted_values = 0u64;
let mut deleted_keys = 0u64;
// delete expired non-host records
deleted_values += self.clear_expired_records(expired_timestamp)?;
let expired_keys = self.get_expired_keys(expired_timestamp)?;
for key in expired_keys {
self.delete_key(key.key_id)?;
deleted_keys += self.connection.changes() as u64;
}
// TODO: clear expired timestamp accessed for keys
Ok((deleted_keys, deleted_values))
}
/// Delete all stale keys and values except for pinned keys and host values.
/// Stale means that `timestamp_accessed` has surpassed `stale_timeout`.
/// Returns all deleted items
pub fn evict_stale(
&self,
current_timestamp_sec: u64,
) -> Result<Vec<EvictStaleItem>, ServiceError> {
let stale_timestamp = current_timestamp_sec - load_config().stale_timeout;
let stale_keys = self.get_stale_keys(stale_timestamp)?;
let mut key_to_delete: Vec<String> = vec![];
let mut results: Vec<EvictStaleItem> = vec![];
let host_id = marine_rs_sdk::get_call_parameters().host_id;
for key in stale_keys.into_iter() {
let records: Vec<Record> = self
.get_records(key.key.key_id.clone())?
.into_iter()
.map(|r| r.record)
.collect();
if !key.pinned && !records.iter().any(|r| r.peer_id == host_id) {
key_to_delete.push(key.key.key_id.clone());
}
results.push(EvictStaleItem {
key: key.key,
records,
});
}
for key_id in key_to_delete {
self.delete_key(key_id.clone())?;
self.delete_records_by_key(key_id)?;
}
Ok(results)
}
}

File diff suppressed because it is too large Load Diff

757
service/src/tests/mod.rs Normal file
View File

@ -0,0 +1,757 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#[cfg(test)]
mod tests {
use fluence_keypair::KeyPair;
use std::fs;
use marine_rs_sdk::{CallParameters, SecurityTetraplet};
use rusqlite::Connection;
marine_rs_sdk_test::include_test_env!("/marine_test_env.rs");
use marine_test_env::registry::{DhtResult, Record, ServiceInterface};
use crate::defaults::{
CONFIG_FILE, DB_PATH, DEFAULT_STALE_VALUE_AGE, KEYS_TABLE_NAME, KEYS_TIMESTAMPS_TABLE_NAME,
RECORDS_TABLE_NAME, TRUSTED_TIMESTAMP_FUNCTION_NAME, TRUSTED_TIMESTAMP_SERVICE_ID,
TRUSTED_WEIGHT_FUNCTION_NAME, TRUSTED_WEIGHT_SERVICE_ID, VALUES_LIMIT,
};
use crate::error::ServiceError::{
InvalidKeyTimestamp, InvalidTimestampTetraplet, InvalidWeightPeerId,
KeyAlreadyExistsNewerTimestamp,
};
use crate::tests::tests::marine_test_env::registry::{Key, RegisterKeyResult, WeightResult};
const HOST_ID: &str = "some_host_id";
impl PartialEq for Key {
fn eq(&self, other: &Self) -> bool {
self.key_id == other.key_id
&& self.label == other.label
&& self.timestamp_created == other.timestamp_created
&& self.signature == other.signature
&& self.peer_id == other.peer_id
}
}
impl Eq for Key {}
fn clear_env() {
let connection = Connection::open(DB_PATH).unwrap();
connection
.execute(f!("DROP TABLE IF EXISTS {KEYS_TABLE_NAME}").as_str(), [])
.unwrap();
connection
.execute(
f!("DROP TABLE IF EXISTS {KEYS_TIMESTAMPS_TABLE_NAME}").as_str(),
[],
)
.unwrap();
connection
.execute(f!("DROP TABLE IF EXISTS {RECORDS_TABLE_NAME}").as_str(), [])
.unwrap();
if fs::metadata(CONFIG_FILE).is_ok() {
fs::remove_file(CONFIG_FILE).unwrap();
}
}
struct CPWrapper {
pub cp: CallParameters,
}
impl CPWrapper {
pub fn new(init_peer_id: &str) -> Self {
Self {
cp: CallParameters {
init_peer_id: init_peer_id.to_string(),
service_id: "".to_string(),
service_creator_peer_id: "".to_string(),
host_id: HOST_ID.to_string(),
particle_id: "".to_string(),
tetraplets: vec![],
},
}
}
pub fn add_timestamp_tetraplets(mut self, arg_number: usize) -> Self {
if self.cp.tetraplets.len() <= arg_number {
self.cp.tetraplets.resize(arg_number + 1, vec![]);
}
self.cp.tetraplets[arg_number] = vec![SecurityTetraplet {
peer_pk: HOST_ID.to_string(),
service_id: TRUSTED_TIMESTAMP_SERVICE_ID.to_string(),
function_name: TRUSTED_TIMESTAMP_FUNCTION_NAME.to_string(),
json_path: "".to_string(),
}];
self
}
fn add_weight_tetraplets(mut self, arg_number: usize) -> Self {
if self.cp.tetraplets.len() < arg_number {
self.cp.tetraplets.resize(arg_number + 1, vec![]);
}
self.cp.tetraplets[arg_number] = vec![SecurityTetraplet {
peer_pk: HOST_ID.to_string(),
service_id: TRUSTED_WEIGHT_SERVICE_ID.to_string(),
function_name: TRUSTED_WEIGHT_FUNCTION_NAME.to_string(),
json_path: "".to_string(),
}];
self
}
pub fn get(&self) -> CallParameters {
self.cp.clone()
}
pub fn reset(&mut self) {
self.cp.tetraplets = vec![];
}
}
fn get_weight(peer_id: String, weight: u32) -> WeightResult {
WeightResult {
success: true,
weight,
peer_id,
error: "".to_string(),
}
}
fn get_invalid_weight() -> WeightResult {
WeightResult {
success: false,
weight: 0,
peer_id: "".to_string(),
error: "get_weight call failed".to_string(),
}
}
fn get_signed_key_bytes(
registry: &mut ServiceInterface,
kp: &KeyPair,
label: String,
timestamp_created: u64,
challenge: Vec<u8>,
challenge_type: String,
) -> Vec<u8> {
let issuer_peer_id = kp.get_peer_id().to_base58();
let key_bytes = registry.get_key_bytes(
label.clone(),
vec![issuer_peer_id.clone()],
timestamp_created,
challenge,
challenge_type,
);
kp.sign(&key_bytes).unwrap().to_vec().to_vec()
}
fn register_key(
registry: &mut ServiceInterface,
kp: &KeyPair,
label: String,
timestamp_created: u64,
current_timestamp: u64,
pin: bool,
weight: u32,
) -> RegisterKeyResult {
let issuer_peer_id = kp.get_peer_id().to_base58();
let challenge = vec![];
let challenge_type = "".to_string();
let signature = get_signed_key_bytes(
registry,
kp,
label.clone(),
timestamp_created,
challenge.clone(),
challenge_type.clone(),
);
let cp = CPWrapper::new(&issuer_peer_id)
.add_weight_tetraplets(7)
.add_timestamp_tetraplets(8);
let weight = get_weight(issuer_peer_id.clone(), weight);
registry.register_key_cp(
label,
vec![issuer_peer_id],
timestamp_created,
challenge,
challenge_type,
signature,
pin,
weight,
current_timestamp,
cp.get(),
)
}
fn register_key_checked(
registry: &mut ServiceInterface,
kp: &KeyPair,
key: String,
timestamp_created: u64,
current_timestamp: u64,
pin: bool,
weight: u32,
) -> String {
let result = register_key(
registry,
kp,
key,
timestamp_created,
current_timestamp,
pin,
weight,
);
assert!(result.success, "{}", result.error);
result.key_id
}
fn get_key_metadata(
registry: &mut ServiceInterface,
key_id: String,
current_timestamp: u64,
) -> Key {
let cp = CPWrapper::new("peer_id").add_timestamp_tetraplets(1);
let result = registry.get_key_metadata_cp(key_id, current_timestamp, cp.get());
assert!(result.success, "{}", result.error);
result.key
}
fn get_signed_record_bytes(
registry: &mut ServiceInterface,
kp: &KeyPair,
key_id: String,
value: String,
relay_id: Vec<String>,
service_id: Vec<String>,
timestamp_created: u64,
solution: Vec<u8>,
) -> Vec<u8> {
let issuer_peer_id = kp.get_peer_id().to_base58();
let mut cp = CPWrapper::new(&issuer_peer_id);
let record_bytes = registry.get_record_bytes_cp(
key_id,
value,
relay_id,
service_id,
timestamp_created,
solution,
cp.get(),
);
kp.sign(&record_bytes).unwrap().to_vec().to_vec()
}
fn put_record(
registry: &mut ServiceInterface,
kp: &KeyPair,
key_id: String,
value: String,
relay_id: Vec<String>,
service_id: Vec<String>,
timestamp_created: u64,
current_timestamp: u64,
weight: u32,
) -> DhtResult {
let issuer_peer_id = kp.get_peer_id().to_base58();
let solution = vec![];
let signature = get_signed_record_bytes(
registry,
kp,
key_id.clone(),
value.clone(),
relay_id.clone(),
service_id.clone(),
timestamp_created,
solution.clone(),
);
let mut cp = CPWrapper::new(&issuer_peer_id)
.add_weight_tetraplets(7)
.add_timestamp_tetraplets(8);
let weight = get_weight(issuer_peer_id.clone(), weight);
registry.put_record_cp(
key_id,
value,
relay_id,
service_id,
timestamp_created,
solution,
signature,
weight,
current_timestamp,
cp.get(),
)
}
fn put_record_checked(
registry: &mut ServiceInterface,
kp: &KeyPair,
key_id: String,
value: String,
relay_id: Vec<String>,
service_id: Vec<String>,
timestamp_created: u64,
current_timestamp: u64,
weight: u32,
) {
let result = put_record(
registry,
kp,
key_id,
value,
relay_id,
service_id,
timestamp_created,
current_timestamp,
weight,
);
assert!(result.success, "{}", result.error);
}
fn get_records(
registry: &mut ServiceInterface,
key_id: String,
current_timestamp: u64,
) -> Vec<Record> {
let cp = CPWrapper::new("some_peer_id").add_timestamp_tetraplets(1);
let result = registry.get_records_cp(key_id, current_timestamp, cp.get());
assert!(result.success, "{}", result.error);
result.result
}
#[test]
fn register_key_invalid_signature() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let issuer_peer_id = kp.get_peer_id().to_base58();
let mut cp = CPWrapper::new(&issuer_peer_id);
let key = "some_key".to_string();
let timestamp_created = 0u64;
let current_timestamp = 100u64;
let weight = get_weight(issuer_peer_id.clone(), 0);
let invalid_signature = vec![];
cp = cp.add_weight_tetraplets(5).add_timestamp_tetraplets(6);
let reg_key_result = registry.register_key_cp(
"some_key".to_string(),
vec![],
100u64,
vec![],
"".to_string(),
invalid_signature,
false,
weight,
10u64,
cp.get(),
);
assert!(!reg_key_result.success);
}
#[test]
fn register_key_invalid_weight_tetraplet() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let issuer_peer_id = kp.get_peer_id().to_base58();
let mut cp = CPWrapper::new(&issuer_peer_id);
let label = "some_key".to_string();
let timestamp_created = 0u64;
let current_timestamp = 100u64;
let challenge = vec![];
let challenge_type = "".to_string();
let weight = get_weight(issuer_peer_id.clone(), 0);
let signature = get_signed_key_bytes(
&mut registry,
&kp,
label.clone(),
timestamp_created,
challenge.clone(),
challenge_type.clone(),
);
cp = cp.add_timestamp_tetraplets(8);
let reg_key_result = registry.register_key_cp(
label,
vec![],
timestamp_created,
challenge,
challenge_type,
signature,
false,
weight,
current_timestamp,
cp.get(),
);
assert!(!reg_key_result.success);
}
#[test]
fn register_key_missing_timestamp_tetraplet() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let issuer_peer_id = kp.get_peer_id().to_base58();
let label = "some_key".to_string();
let timestamp_created = 0u64;
let current_timestamp = 100u64;
let weight = get_weight(issuer_peer_id.clone(), 0);
let challenge = vec![1u8, 2u8, 3u8];
let challenge_type = "type".to_string();
let signature = get_signed_key_bytes(
&mut registry,
&kp,
label.clone(),
timestamp_created,
challenge.clone(),
challenge_type.clone(),
);
let cp = CPWrapper::new(&issuer_peer_id).add_weight_tetraplets(7);
let reg_key_result = registry.register_key_cp(
label,
vec![],
timestamp_created,
challenge,
challenge_type,
signature,
false,
weight,
current_timestamp,
cp.get(),
);
assert!(!reg_key_result.success);
assert_eq!(
reg_key_result.error,
InvalidTimestampTetraplet(format!("{:?}", cp.cp.tetraplets)).to_string()
);
}
#[test]
fn register_key_invalid_weight_peer_id() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let issuer_peer_id = kp.get_peer_id().to_base58();
let invalid_peer_id = "INVALID_PEER_ID".to_string();
let mut cp = CPWrapper::new(&issuer_peer_id);
let label = "some_key".to_string();
let timestamp_created = 0u64;
let current_timestamp = 100u64;
let challenge = vec![1u8, 2u8, 3u8];
let challenge_type = "type".to_string();
let weight = get_weight(invalid_peer_id.clone(), 0);
let signature = get_signed_key_bytes(
&mut registry,
&kp,
label.clone(),
timestamp_created,
challenge.clone(),
challenge_type.clone(),
);
cp = cp.add_weight_tetraplets(7).add_timestamp_tetraplets(8);
let reg_key_result = registry.register_key_cp(
label,
vec![],
timestamp_created,
challenge,
challenge_type,
signature,
false,
weight,
current_timestamp,
cp.get(),
);
assert!(!reg_key_result.success);
assert_eq!(
reg_key_result.error,
InvalidWeightPeerId(issuer_peer_id, invalid_peer_id).to_string()
);
}
#[test]
fn register_key_correct() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let key = "some_key".to_string();
let timestamp_created = 0u64;
let current_timestamp = 100u64;
let weight = 0;
let pin = false;
let result = register_key(
&mut registry,
&kp,
key,
timestamp_created,
current_timestamp,
pin,
weight,
);
assert!(result.success, "{}", result.error);
}
#[test]
fn register_key_older_timestamp() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let key = "some_key".to_string();
let timestamp_created_first = 100u64;
let current_timestamp = 1000u64;
let weight = 0;
let pin = false;
register_key_checked(
&mut registry,
&kp,
key.clone(),
timestamp_created_first,
current_timestamp,
pin,
weight,
);
let timestamp_created_second = timestamp_created_first - 10u64;
let result_second = register_key(
&mut registry,
&kp,
key.clone(),
timestamp_created_second,
current_timestamp,
pin,
weight,
);
assert_eq!(
result_second.error,
KeyAlreadyExistsNewerTimestamp(key, kp.get_peer_id().to_base58()).to_string()
);
}
#[test]
fn register_key_in_the_future() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let key = "some_key".to_string();
let current_timestamp = 100u64;
let timestamp_created = current_timestamp + 100u64;
let weight = 0;
let pin = false;
let result = register_key(
&mut registry,
&kp,
key,
timestamp_created,
current_timestamp,
pin,
weight,
);
assert_eq!(result.error, InvalidKeyTimestamp.to_string())
}
#[test]
fn register_key_update_republish_old() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let issuer_peer_id = kp.get_peer_id().to_base58();
let key = "some_key".to_string();
let timestamp_created_old = 0u64;
let current_timestamp = 100u64;
let weight = 0;
let pin = false;
let key_id = register_key_checked(
&mut registry,
&kp,
key.clone(),
timestamp_created_old,
current_timestamp,
pin,
weight,
);
let old_key = get_key_metadata(&mut registry, key_id.clone(), current_timestamp);
let timestamp_created_new = timestamp_created_old + 10u64;
register_key_checked(
&mut registry,
&kp,
key,
timestamp_created_new,
current_timestamp,
pin,
weight,
);
let new_key = get_key_metadata(&mut registry, key_id.clone(), current_timestamp);
assert_ne!(old_key, new_key);
let cp = CPWrapper::new(&issuer_peer_id)
.add_weight_tetraplets(1)
.add_timestamp_tetraplets(2);
let weight = get_weight(issuer_peer_id.clone(), weight);
let result =
registry.republish_key_cp(old_key.clone(), weight, current_timestamp, cp.get());
assert!(result.success, "{}", result.error);
let result_key = get_key_metadata(&mut registry, key_id.clone(), current_timestamp);
assert_eq!(new_key, result_key);
}
#[test]
fn get_key_metadata_test() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let label = "some_key".to_string();
let timestamp_created = 0u64;
let current_timestamp = 100u64;
let weight = 0;
let pin = false;
let challenge = vec![];
let challenge_type = "".to_string();
let issuer_peer_id = kp.get_peer_id().to_base58();
let key_bytes = registry.get_key_bytes(
label.clone(),
vec![issuer_peer_id.clone()],
timestamp_created,
challenge.clone(),
challenge_type.clone(),
);
let signature = kp.sign(&key_bytes).unwrap().to_vec().to_vec();
let key_id = register_key_checked(
&mut registry,
&kp,
label.clone(),
timestamp_created,
current_timestamp,
pin,
weight,
);
let result_key = get_key_metadata(&mut registry, key_id.clone(), current_timestamp);
let expected_key = Key {
key_id,
label,
peer_id: issuer_peer_id,
timestamp_created,
challenge,
challenge_type,
signature,
};
assert_eq!(result_key, expected_key);
}
#[test]
fn republish_same_key_test() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let issuer_peer_id = kp.get_peer_id().to_base58();
let key = "some_key".to_string();
let timestamp_created = 0u64;
let current_timestamp = 100u64;
let weight = 0;
let pin = false;
let key_id = register_key_checked(
&mut registry,
&kp,
key.clone(),
timestamp_created,
current_timestamp,
pin,
weight,
);
let result_key = get_key_metadata(&mut registry, key_id.clone(), current_timestamp);
let cp = CPWrapper::new(&issuer_peer_id)
.add_weight_tetraplets(1)
.add_timestamp_tetraplets(2);
let weight = get_weight(issuer_peer_id.clone(), weight);
let result =
registry.republish_key_cp(result_key.clone(), weight, current_timestamp, cp.get());
assert!(result.success, "{}", result.error);
}
#[test]
fn test_put_get_record() {
clear_env();
let mut registry = ServiceInterface::new();
let kp = KeyPair::generate_ed25519();
let key = "some_key".to_string();
let timestamp_created = 0u64;
let current_timestamp = 100u64;
let weight = 0;
let pin = false;
let key_id = register_key_checked(
&mut registry,
&kp,
key,
timestamp_created,
current_timestamp,
pin,
weight,
);
let value = "some_value".to_string();
let relay_id = vec!["some_relay".to_string()];
let service_id = vec!["some_service_id".to_string()];
let weight = 5u32;
put_record_checked(
&mut registry,
&kp,
key_id.clone(),
value.clone(),
relay_id.clone(),
service_id.clone(),
timestamp_created,
current_timestamp,
weight,
);
let records = get_records(&mut registry, key_id.clone(), current_timestamp);
assert_eq!(records.len(), 1);
let record = &records[0];
assert_eq!(record.key_id, key_id);
assert_eq!(record.relay_id, relay_id);
assert_eq!(record.service_id, service_id);
assert_eq!(record.peer_id, kp.get_peer_id().to_base58());
assert_eq!(record.value, value);
assert_eq!(record.set_by, kp.get_peer_id().to_base58());
}
}

View File

@ -0,0 +1,83 @@
/*
* Copyright 2021 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::defaults::{
TRUSTED_TIMESTAMP_FUNCTION_NAME, TRUSTED_TIMESTAMP_SERVICE_ID, TRUSTED_WEIGHT_FUNCTION_NAME,
TRUSTED_WEIGHT_SERVICE_ID,
};
use crate::error::ServiceError;
use crate::error::ServiceError::{
InvalidSetHostValueTetraplet, InvalidTimestampTetraplet, InvalidWeightTetraplet,
};
use crate::record::Record;
use marine_rs_sdk::CallParameters;
/// Check timestamps are generated on the current host with builtin ("peer" "timestamp_sec")
pub(crate) fn check_timestamp_tetraplets(
call_parameters: &CallParameters,
arg_number: usize,
) -> Result<(), ServiceError> {
let tetraplets = call_parameters
.tetraplets
.get(arg_number)
.ok_or_else(|| InvalidTimestampTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
let tetraplet = tetraplets
.get(0)
.ok_or_else(|| InvalidTimestampTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
(tetraplet.service_id == TRUSTED_TIMESTAMP_SERVICE_ID
&& tetraplet.function_name == TRUSTED_TIMESTAMP_FUNCTION_NAME
&& tetraplet.peer_pk == call_parameters.host_id)
.then(|| ())
.ok_or_else(|| InvalidTimestampTetraplet(format!("{:?}", tetraplet)))
}
pub(crate) fn check_host_value_tetraplets(
call_parameters: &CallParameters,
arg_number: usize,
host_value: &Record,
) -> Result<(), ServiceError> {
let tetraplets = call_parameters
.tetraplets
.get(arg_number)
.ok_or_else(|| InvalidSetHostValueTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
let tetraplet = tetraplets
.get(0)
.ok_or_else(|| InvalidSetHostValueTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
(tetraplet.service_id == "aqua-dht"
&& tetraplet.function_name == "put_host_value"
&& tetraplet.peer_pk == host_value.peer_id)
.then(|| ())
.ok_or_else(|| InvalidSetHostValueTetraplet(format!("{:?}", tetraplet)))
}
pub(crate) fn check_weight_tetraplets(
call_parameters: &CallParameters,
arg_number: usize,
index: usize,
) -> Result<(), ServiceError> {
let tetraplets = call_parameters
.tetraplets
.get(arg_number)
.ok_or_else(|| InvalidWeightTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
let tetraplet = tetraplets
.get(index)
.ok_or_else(|| InvalidWeightTetraplet(format!("{:?}", call_parameters.tetraplets)))?;
(tetraplet.service_id == TRUSTED_WEIGHT_SERVICE_ID
&& tetraplet.function_name == TRUSTED_WEIGHT_FUNCTION_NAME
&& tetraplet.peer_pk == call_parameters.host_id)
.then(|| ())
.ok_or_else(|| InvalidWeightTetraplet(format!("{:?}", tetraplet)))
}