feat: support ipfs dag + e2e tests [NET-476] (#91)

This commit is contained in:
Aleksey Proshutisnkiy 2023-08-08 01:06:16 +03:00 committed by GitHub
parent 6bca793c25
commit 1dd30ba0ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 40877 additions and 59 deletions

2
.cargo/config Normal file
View File

@ -0,0 +1,2 @@
[registries]
fluence = { index = "git://crates.fluence.dev/index" }

View File

@ -1,11 +1,11 @@
services: services:
rust-peer: nox:
image: ${RUST_PEER_IMAGE:-fluencelabs/nox:unstable_minimal} image: ${NOX_IMAGE:-fluencelabs/nox:unstable_minimal}
ports: ports:
- 7771:7771 - 7771:7771
- 9991:9991 - 9991:9991
environment: environment:
- FLUENCE_ENV_AQUA_IPFS_EXTERNAL_API_MULTIADDR=/dns4/ipfs/tcp/5001 - FLUENCE_ENV_AQUA_IPFS_EXTERNAL_API_MULTIADDR=/ip4/127.0.0.1/tcp/5001
- FLUENCE_ENV_AQUA_IPFS_LOCAL_API_MULTIADDR=/dns4/ipfs/tcp/5001 - FLUENCE_ENV_AQUA_IPFS_LOCAL_API_MULTIADDR=/dns4/ipfs/tcp/5001
command: command:
- --aqua-pool-size=2 - --aqua-pool-size=2
@ -14,8 +14,6 @@ services:
- --local - --local
# 12D3KooWBM3SdXWqGaawQDGQ6JprtwswEg3FWGvGhmgmMez1vRbR # 12D3KooWBM3SdXWqGaawQDGQ6JprtwswEg3FWGvGhmgmMez1vRbR
- -k=hK62afickoeP2uZbmSkAYXxxqP8ozq16VRN7qfTP719EHC5V5tjrtW57BSjUr8GvsEXmJRbtejUWyPZ2rZMyQdq - -k=hK62afickoeP2uZbmSkAYXxxqP8ozq16VRN7qfTP719EHC5V5tjrtW57BSjUr8GvsEXmJRbtejUWyPZ2rZMyQdq
volumes:
- ../../aqua-ipfs:/.fluence/v1/builtins/aqua-ipfs
ipfs: ipfs:
image: ipfs/kubo:latest image: ipfs/kubo:latest

71
.github/workflows/e2e.yml vendored Normal file
View File

@ -0,0 +1,71 @@
name: "e2e"
on:
pull_request:
paths-ignore:
- "**.md"
- ".github/**"
- "!.github/workflows/e2e.yml"
- "!.github/workflows/snapshot.yml"
types:
- "labeled"
- "synchronize"
- "opened"
- "reopened"
push:
branches:
- "main"
paths-ignore:
- "**.md"
- ".github/**"
- "!.github/workflows/e2e.yml"
- "!.github/workflows/snapshot.yml"
concurrency:
group: "${{ github.workflow }}-${{ github.ref }}"
cancel-in-progress: true
jobs:
snapshot:
if: >
github.event_name == 'push' ||
contains(github.event.pull_request.labels.*.name, 'e2e')
name: "aqua-ipfs"
uses: ./.github/workflows/snapshot.yml
with:
ref: ${{ github.ref }}
nox:
needs:
- snapshot
uses: fluencelabs/nox/.github/workflows/build.yml@master
with:
cargo-dependencies: |
[
{
"package": "aqua-ipfs-distro",
"version": "=${{ needs.snapshot.outputs.cargo-version }}",
"manifest": "crates/system-services/Cargo.toml",
"registry": "fluence"
}
]
nox-snapshot:
name: "nox"
needs:
- nox
uses: fluencelabs/nox/.github/workflows/container.yml@master
with:
image-name: "docker.fluence.dev/aqua-ipfs"
flavour: "minimal"
nox-sha: "${{ needs.nox.outputs.nox-sha }}"
aqua-ipfs-tests:
name: "aqua-ipfs"
needs:
- nox-snapshot
uses: ./.github/workflows/tests.yml
with:
ref: ${{ github.ref }}
nox-image: "${{ needs.nox-snapshot.outputs.nox-image }}"

View File

@ -14,8 +14,42 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
aqua-ipfs: tests:
uses: ./.github/workflows/tests.yml name: "cargo nextest"
runs-on: builder
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Setup marine
uses: fluencelabs/setup-marine@v1
- name: Install cargo-nextest
uses: baptiste0928/cargo-install@v1.3.0
with:
crate: cargo-nextest
version: 0.9.22
- name: Build aqua-ipfs
working-directory: ./service
run: ./build.sh
- name: Run cargo nextest
working-directory: ./service
env:
NEXTEST_TEST_THREADS: 10
run: cargo nextest run --release --all-features --no-fail-fast
- name: Run cargo clippy
uses: actions-rs/cargo@v1
with:
command: clippy
args: -Z unstable-options --all --manifest-path service/Cargo.toml
lints: lints:
name: Lints name: Lints

92
.github/workflows/snapshot.yml vendored Normal file
View File

@ -0,0 +1,92 @@
name: Build snapshot
on:
workflow_call:
inputs:
cargo-dependencies:
description: "Cargo dependencies map"
type: string
default: "null"
ref:
description: "git ref to checkout to"
type: string
default: "main"
outputs:
cargo-version:
description: "Cargo snapshot version"
value: ${{ jobs.snapshot.outputs.version }}
jobs:
snapshot:
name: "Build and test crates"
runs-on: builder
timeout-minutes: 60
outputs:
version: "${{ steps.snapshot.outputs.version }}"
permissions:
contents: read
id-token: write
steps:
- uses: actions/checkout@v3
with:
repository: fluencelabs/aqua-ipfs
ref: ${{ inputs.ref }}
- name: Setup rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
cache: false
- name: Set dependencies
if: inputs.cargo-dependencies != 'null'
uses: fluencelabs/github-actions/cargo-set-dependency@main
with:
dependencies: ${{ inputs.cargo-dependencies }}
path: service
- name: Download marine artifact
id: marine
uses: actions/download-artifact@v3
continue-on-error: true
with:
name: marine
path: ~/.local/bin
- name: Make marine executable
if: steps.marine.outcome == 'success'
run: chmod +x ~/.local/bin/marine
- name: Setup marine
if: steps.marine.outcome == 'failure'
uses: fluencelabs/setup-marine@v1
- name: Build and package aqua-ipfs
working-directory: ./service
run: ./build.sh
- name: Import secrets
uses: hashicorp/vault-action@v2.5.0
with:
url: https://vault.fluence.dev
path: jwt/github
role: ci
method: jwt
jwtGithubAudience: "https://github.com/fluencelabs"
jwtTtl: 300
exportToken: false
secrets: |
kv/cargo-registry/users/ci token | CARGO_REGISTRIES_FLUENCE_TOKEN
- name: Generate snapshot version
id: version
uses: fluencelabs/github-actions/generate-snapshot-id@main
- name: Publish crate snapshots
id: snapshot
uses: fluencelabs/github-actions/cargo-publish-snapshot@main
with:
id: ${{ steps.version.outputs.id }}
path: service/distro

View File

@ -2,6 +2,29 @@ name: Run tests with workflow_call
on: on:
workflow_call: workflow_call:
inputs:
fluence-env:
description: "Fluence enviroment to run tests agains"
type: string
default: "local"
nox-image:
description: "nox image tag"
type: string
default: "fluencelabs/nox:unstable_minimal"
ref:
description: "GitHub ref to checkout to"
type: string
default: "main"
flox-version:
description: "@fluencelabs/flox version"
type: string
default: "null"
env:
CI: true
FORCE_COLOR: true
NOX_IMAGE: "${{ inputs.nox-image }}"
FLUENCE_ENV: "${{ inputs.fluence-env }}"
jobs: jobs:
aqua-ipfs: aqua-ipfs:
@ -28,6 +51,9 @@ jobs:
- name: Checkout aqua-ipfs - name: Checkout aqua-ipfs
uses: actions/checkout@v3 uses: actions/checkout@v3
with:
repository: fluencelabs/aqua-ipfs
ref: ${{ inputs.ref }}
- name: Setup Rust toolchain - name: Setup Rust toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1 uses: actions-rust-lang/setup-rust-toolchain@v1
@ -39,33 +65,83 @@ jobs:
working-directory: ./service working-directory: ./service
run: ./build.sh run: ./build.sh
# - name: Run cargo clippy - name: Setup node with self-hosted registry
# uses: actions-rs/cargo@v1 uses: actions/setup-node@v3
# with:
# command: clippy
# args: -Z unstable-options --all --manifest-path service/Cargo.toml
- name: Install cargo-nextest
uses: baptiste0928/cargo-install@v1.3.0
with: with:
crate: cargo-nextest node-version: "18"
version: 0.9.22 registry-url: "https://npm.fluence.dev"
cache: "npm"
cache-dependency-path: "aqua-tests/package-lock.json"
- name: Run cargo nextest - run: npm install
working-directory: ./service working-directory: aqua-tests
- name: Set flox version
if: inputs.flox-version != 'null'
uses: fluencelabs/github-actions/npm-set-dependency@main
with:
package: "@fluencelabs/cli"
version: ${{ inputs.flox-version }}
working-directory: aqua-tests
flags: "--save-dev"
- name: Login to DockerHub
uses: docker/login-action@v2
with:
registry: docker.fluence.dev
username: ${{ env.DOCKER_USERNAME }}
password: ${{ env.DOCKER_PASSWORD }}
- name: Pull nox image
run: docker pull $NOX_IMAGE
- name: Run nox network
uses: isbang/compose-action@v1.4.1
with:
compose-file: ".github/e2e/docker-compose.yml"
down-flags: "--volumes"
- name: Setup python
uses: actions/setup-python@v4
with:
python-version: "3.9"
cache: "pip"
cache-dependency-path: aqua-tests/requirements.txt
- name: Install python requirements
run: pip install -r requirements.txt
working-directory: aqua-tests
- name: Install fluence deps
env: env:
NEXTEST_RETRIES: 10 FLUENCE_USER_DIR: "${{ github.workspace }}/tmp/.fluence"
NEXTEST_TEST_THREADS: 10 run: npx fluence dependency npm install
run: cargo nextest run --release --all-features --no-fail-fast working-directory: aqua-tests
- name: Install ipfs - name: Install ipfs
uses: nahsi/setup-ipfs@v1 uses: nahsi/setup-ipfs@v1
- name: Create distribution package - name: Run aqua tests
run: ./builtin-package/package.sh env:
FLUENCE_USER_DIR: "${{ github.workspace }}/tmp/.fluence"
run: pytest -n auto test_aqua.py
working-directory: aqua-tests
- name: Upload aqua-ipfs - name: Print versions to check summary
uses: actions/upload-artifact@v3 if: always()
with: working-directory: aqua-tests
name: aqua-ipfs run: |
path: aqua-ipfs.tar.gz cat <<SNAPSHOT >> $GITHUB_STEP_SUMMARY
## Used versions
\`\`\`
$(npx fluence dep v)
\`\`\`
SNAPSHOT
- name: Dump nox logs
if: always()
uses: jwalton/gh-docker-logs@v2
- name: Remove tmp directory
if: always()
run: sudo rm -rf tmp

View File

@ -0,0 +1,16 @@
# yaml-language-server: $schema=schemas/project-secrets.json
# Defines project's secret keys that are used only in the scope of this particular Fluence project. You can manage project's keys using commands from `fluence key` group of commands
# Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/project-secrets.md
version: 0
keyPairs:
[
{
peerId: 12D3KooWAtJ3RXGiW2WzZYUk7XxAEyfwLdqJHsPXmb7xd1dY7uZK,
secretKey: BNidntUryx+hxr7NK2z9nci23sMn3fURB6bTH1K2Ll4=,
publicKey: CAESIA/dWDmfLpI+PmldVAgQblramknRSyfivJ5x/Y0W6EQC,
name: test_put_get_dag
},
]

12
aqua-tests/.gitignore vendored Normal file
View File

@ -0,0 +1,12 @@
.idea
.DS_Store
.fluence/*
!.fluence/project-secrets.yaml
**/node_modules
**/target/
.repl_history
.vscode
src/ts/src/aqua
src/js/src/aqua
__pycache__
.pytest_cache

16
aqua-tests/README.md Normal file
View File

@ -0,0 +1,16 @@
# Aqua-IPFS API tests
## How to run
- `npm i`
- `pip3 install -r requirements.txt`
- `pip install -U pytest`
- `pytest -n auto`
## Adding new test
Before adding new test go to the aqua-tests dir first, then run `npm run secret`
to add a new key-pair for the new test.
Name it the same way the test function will be called (e.g. `test_create_resource`)
This is required for tests to run in parallel. Key-pairs could've been generated on the fly
but it's a bit faster to not waste time on it each time the tests are run

6
aqua-tests/config.py Normal file
View File

@ -0,0 +1,6 @@
def get_local():
return [
'/ip4/127.0.0.1/tcp/9991/ws/p2p/12D3KooWBM3SdXWqGaawQDGQ6JprtwswEg3FWGvGhmgmMez1vRbR',
]

8
aqua-tests/fluence.yaml Normal file
View File

@ -0,0 +1,8 @@
# yaml-language-server: $schema=.fluence/schemas/fluence.json
# Defines Fluence Project, most importantly - what exactly you want to deploy and how. You can use `fluence init` command to generate a template for new Fluence project
# Documentation: https://github.com/fluencelabs/fluence-cli/tree/main/docs/configs/fluence.md
version: 2
aquaInputPath: src/aqua/main.aqua

View File

@ -0,0 +1,13 @@
const {
krasnodar,
stage,
testNet,
} = require('@fluencelabs/fluence-network-environment')
console.log(
JSON.stringify({
krasnodar,
stage,
testnet: testNet,
}),
)

40201
aqua-tests/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

34
aqua-tests/package.json Normal file
View File

@ -0,0 +1,34 @@
{
"name": "aqua-tests",
"version": "1.0.0",
"description": "Aqua-IPFS tests",
"dependencies": {
"@fluencelabs/aqua-lib": "0.7.3",
"@fluencelabs/aqua-ipfs": "file:../aqua"
},
"scripts": {
"compile-aqua": "fluence aqua -i . -o ./target/typescript",
"generate-aqua": "../service/build.sh",
"build": "npm run compile-aqua",
"secret": "npx fluence key new"
},
"repository": {
"type": "git",
"url": "git+https://github.com/fluencelabs/aqua-ipfs.git",
"directory": "aqua"
},
"keywords": [
"aqua",
"fluence"
],
"author": "Fluence Labs",
"license": "MIT",
"bugs": {
"url": "https://github.com/fluencelabs/aqua-ipfs/issues"
},
"homepage": "https://github.com/fluencelabs/aqua-ipfs",
"devDependencies": {
"@fluencelabs/cli": "0.5.4",
"@fluencelabs/fluence-network-environment": "1.0.14"
}
}

View File

@ -0,0 +1,4 @@
delegator.py==0.1.1
pytest==7.3.0
pytest-xdist==3.2.1
pytest-repeat==0.9.1

View File

@ -0,0 +1,12 @@
aqua Main
import Dist from "@fluencelabs/aqua-lib/builtin.aqua"
import "@fluencelabs/aqua-ipfs/ipfs-api.aqua"
export get_external_api_multiaddr, load_blueprint_from_vault
func load_blueprint_from_vault(node: string, cid: string) -> string:
on node:
dag <- dag_get(node, cid)
blueprint <- Dist.load_blueprint(dag.path)
<- blueprint

120
aqua-tests/test_aqua.py Normal file
View File

@ -0,0 +1,120 @@
import delegator
import random
import json
import os
import tempfile
import inspect
from config import get_local
delegator.run("npx fluence dep npm i", block=True)
default_peers = json.loads(delegator.run(f"node ./getDefaultPeers.js", block=True).out)
def get_relays():
env = os.environ.get("FLUENCE_ENV")
if env == "local":
peers = get_local()
else:
if env is None:
env = "testnet"
peers = [peer["multiaddr"] for peer in default_peers[env]]
assert len(peers) != 0, "No relays found"
return peers
relays = get_relays()
peer_ids = [relay.split("/")[-1] for relay in relays]
def get_random_list_item(ar):
return ar[random.randint(0, len(ar) - 1)]
def get_random_relay():
return get_random_list_item(relays)
def get_random_peer_id():
return get_random_list_item(peer_ids)
def run_aqua(func, args, relay=get_random_relay()):
# "a" : arg1, "b" : arg2 .....
data = {chr(97 + i): arg for (i, arg) in enumerate(args)}
call = f"{func}(" + ", ".join([chr(97 + i) for i in range(0, len(args))]) + ")"
# inspect.stack method inspects the current execution stack as the name suggests
# it's possible to infer that the minus 39th element of the stack always contains info
# about the test function that is currently running. The third element is the function's name
try:
test_name = inspect.stack()[-39][3]
except:
# when running one test at a time, the stack is shorter so we need to use a different index
test_name = inspect.stack()[-32][3]
command = f"npx fluence run -k {test_name} --relay {relay} -f '{call}' --data '{json.dumps(data)}' --import 'node_modules' --quiet --particle-id"
print(command)
c = delegator.run(command, block=True)
lines = c.out.splitlines()
particle_id = lines[0] if len(lines) != 0 else ""
if len(c.err.strip()) != 0:
print(f"{particle_id}\n{c.err}")
result = "\n".join(lines[1:])
try:
result = json.loads(result)
print(result)
return result
except:
print(result)
return result
def put_dag(api, data):
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(data.encode())
tmp.flush()
c = delegator.run(f"ipfs --api {api} dag put {tmp.name}", block=True)
if len(c.err.strip()) != 0:
print(f"dag put error: {c.err}")
return c.out.strip()
def test_put_get_dag():
dag = """
{
"name": "ipfs_pure",
"dependencies": [
{
"/": "bafkreibrmbfv7ab4dokljanddvq5nah66cdody2biusqgqlfqduwn4avdi"
},
{
"/": "bafybeicovoqrw75mskauoaknyxpla7xadtv5m2lphlrtjdj7dlacm6wawi"
}
]
}
"""
relay_multiaddr = get_random_relay()
relay_peer_id = relay_multiaddr.split("/")[-1]
ext_api_endpoint = run_aqua(
"get_external_api_multiaddr", [relay_peer_id], relay=relay_multiaddr
)
assert ext_api_endpoint["success"] == True
cid = put_dag(ext_api_endpoint["multiaddr"], dag)
assert cid != ""
blueprint = run_aqua(
"load_blueprint_from_vault", [relay_peer_id, cid], relay=relay_multiaddr
)
assert blueprint["name"] == "ipfs_pure"
assert (
blueprint["dependencies"][0]["/"]
== "bafkreibrmbfv7ab4dokljanddvq5nah66cdody2biusqgqlfqduwn4avdi"
)
assert (
blueprint["dependencies"][1]["/"]
== "bafybeicovoqrw75mskauoaknyxpla7xadtv5m2lphlrtjdj7dlacm6wawi"
)

View File

@ -52,12 +52,31 @@ func put(node: PeerId, path: string) -> IpfsPutResult:
result <- Ipfs.put(path) result <- Ipfs.put(path)
<- result <- result
-- Upload file `path` as a dag to IPFS node running on `node`
-- path should exist & be available to `aqua-ipfs`
func dag_put(node: PeerId, path: string) -> IpfsPutResult:
on node:
result <- Ipfs.dag_put(path)
<- result
-- Returns file path of the dag `cid` from local cache of IPFS node `node`
func dag_get(node: PeerId, cid: CID) -> IpfsGetResult:
on node:
result <- Ipfs.dag_get(cid)
<- result
-- Download file `cid` from IPFS node `from` and save it to `node` -- Download file `cid` from IPFS node `from` and save it to `node`
func get_from(node: PeerId, cid: CID, from: Multiaddr) -> IpfsGetResult: func get_from(node: PeerId, cid: CID, from: Multiaddr) -> IpfsGetResult:
on node: on node:
result <- Ipfs.get_from(cid, from) result <- Ipfs.get_from(cid, from)
<- result <- result
-- Return contents of the dag `cid` from IPFS node `from` and save it to `node`
func dag_get_from(node: PeerId, cid: CID, from: Multiaddr) -> IpfsGetResult:
on node:
result <- Ipfs.dag_get_from(cid, from)
<- result
-- Return contents of the file `cid` from IPFS node `from` -- Return contents of the file `cid` from IPFS node `from`
func cat_from(node: PeerId, cid: CID, from: Multiaddr) -> IpfsCatResult: func cat_from(node: PeerId, cid: CID, from: Multiaddr) -> IpfsCatResult:
on node: on node:

View File

@ -28,6 +28,9 @@ service Ipfs("aqua-ipfs"):
cat(hash: string) -> IpfsCatResult cat(hash: string) -> IpfsCatResult
cat_from(hash: string, external_multiaddr: string) -> IpfsCatResult cat_from(hash: string, external_multiaddr: string) -> IpfsCatResult
connect(multiaddr: string) -> IpfsResult connect(multiaddr: string) -> IpfsResult
dag_get(hash: string) -> IpfsGetResult
dag_get_from(hash: string, external_multiaddr: string) -> IpfsGetResult
dag_put(file_path: string) -> IpfsPutResult
get(hash: string) -> IpfsGetResult get(hash: string) -> IpfsGetResult
get_external_api_multiaddr() -> IpfsMultiaddrResult get_external_api_multiaddr() -> IpfsMultiaddrResult
get_external_swarm_multiaddr() -> IpfsMultiaddrResult get_external_swarm_multiaddr() -> IpfsMultiaddrResult

View File

@ -1,6 +1,6 @@
pub const IPFS_EFFECTOR: &'static [u8] = include_bytes!("../ipfs-service/ipfs_effector.wasm"); pub const IPFS_EFFECTOR: &[u8] = include_bytes!("../ipfs-service/ipfs_effector.wasm");
pub const IPFS_FACADE: &'static [u8] = include_bytes!("../ipfs-service/ipfs_pure.wasm"); pub const IPFS_FACADE: &[u8] = include_bytes!("../ipfs-service/ipfs_pure.wasm");
pub const CONFIG: &'static [u8] = include_bytes!("../ipfs-service/Config.toml"); pub const CONFIG: &[u8] = include_bytes!("../ipfs-service/Config.toml");
pub mod build_info { pub mod build_info {
include!(concat!(env!("OUT_DIR"), "/built.rs")); include!(concat!(env!("OUT_DIR"), "/built.rs"));

View File

@ -21,12 +21,16 @@ use marine_rs_sdk::marine;
use marine_rs_sdk::module_manifest; use marine_rs_sdk::module_manifest;
use marine_rs_sdk::MountedBinaryResult; use marine_rs_sdk::MountedBinaryResult;
use marine_rs_sdk::WasmLoggerBuilder; use marine_rs_sdk::WasmLoggerBuilder;
use std::fs;
use itertools::Itertools; use itertools::Itertools;
use types::{IpfsCatResult, IpfsGetPeerIdResult, IpfsPutResult, IpfsResult}; use types::{IpfsCatResult, IpfsGetPeerIdResult, IpfsPutResult, IpfsResult};
module_manifest!(); module_manifest!();
/// Default chunk size for `ipfs add` command to produce stable CIDs.
const CHUCK_SIZE: usize = 262144;
pub fn main() { pub fn main() {
WasmLoggerBuilder::new() WasmLoggerBuilder::new()
.with_log_level(log::LevelFilter::Info) .with_log_level(log::LevelFilter::Info)
@ -64,8 +68,6 @@ fn make_cmd_args(args: Vec<String>, api_multiaddr: String, timeout_sec: u64) ->
#[marine] #[marine]
pub fn connect(multiaddr: String, api_multiaddr: String, timeout_sec: u64) -> IpfsResult { pub fn connect(multiaddr: String, api_multiaddr: String, timeout_sec: u64) -> IpfsResult {
log::info!("connect called with multiaddr {}", multiaddr);
let args = vec![String::from("swarm"), String::from("connect"), multiaddr]; let args = vec![String::from("swarm"), String::from("connect"), multiaddr];
let cmd = make_cmd_args(args, api_multiaddr, timeout_sec); let cmd = make_cmd_args(args, api_multiaddr, timeout_sec);
@ -75,8 +77,6 @@ pub fn connect(multiaddr: String, api_multiaddr: String, timeout_sec: u64) -> Ip
/// Put file from specified path to IPFS and return its hash. /// Put file from specified path to IPFS and return its hash.
#[marine] #[marine]
pub fn put(file_path: String, api_multiaddr: String, timeout_sec: u64) -> IpfsPutResult { pub fn put(file_path: String, api_multiaddr: String, timeout_sec: u64) -> IpfsPutResult {
log::info!("put called with file path {}", file_path);
if !std::path::Path::new(&file_path).exists() { if !std::path::Path::new(&file_path).exists() {
return IpfsPutResult { return IpfsPutResult {
success: false, success: false,
@ -89,33 +89,66 @@ pub fn put(file_path: String, api_multiaddr: String, timeout_sec: u64) -> IpfsPu
String::from("add"), String::from("add"),
String::from("-Q"), String::from("-Q"),
inject_vault_host_path(file_path), inject_vault_host_path(file_path),
String::from("--cid-version=1"),
format!("--chunker=size-{}", CHUCK_SIZE),
]; ];
let cmd = make_cmd_args(args, api_multiaddr, timeout_sec); let cmd = make_cmd_args(args, api_multiaddr, timeout_sec);
log::info!("ipfs put args {:?}", cmd);
run_ipfs(cmd).map(|res| res.trim().to_string()).into() run_ipfs(cmd).map(|res| res.trim().to_string()).into()
} }
/// Get file by provided hash from IPFS, save it to a `file_path, and return that path /// Put dag from specified path to IPFS and return its hash.
#[marine] #[marine]
pub fn get(hash: String, file_path: String, api_multiaddr: String, timeout_sec: u64) -> IpfsResult { pub fn dag_put(file_path: String, api_multiaddr: String, timeout_sec: u64) -> IpfsPutResult {
log::info!("get called with hash {}", hash); if !std::path::Path::new(&file_path).exists() {
return IpfsPutResult {
success: false,
error: format!("path {} doesn't exist", file_path),
hash: "".to_string(),
};
}
let args = vec![
String::from("dag"),
String::from("put"),
inject_vault_host_path(file_path),
];
let cmd = make_cmd_args(args, api_multiaddr, timeout_sec);
run_ipfs(cmd).map(|res| res.trim().to_string()).into()
}
/// Get file by provided hash from IPFS, save it to a `file_path`, and return that path
#[marine]
pub fn get(hash: String, file_path: &str, api_multiaddr: String, timeout_sec: u64) -> IpfsResult {
let args = vec![ let args = vec![
String::from("get"), String::from("get"),
String::from("-o"), String::from("-o"),
inject_vault_host_path(file_path), inject_vault_host_path(file_path.to_string()),
hash, hash,
]; ];
let cmd = make_cmd_args(args, api_multiaddr, timeout_sec); let cmd = make_cmd_args(args, api_multiaddr, timeout_sec);
log::info!("ipfs get args {:?}", cmd); run_ipfs(cmd).map(drop).into()
}
run_ipfs(cmd) /// Get dag by provided hash from IPFS, save it to a `file_path`, and return that path
.map(|output| { #[marine]
log::info!("ipfs get output: {}", output); pub fn dag_get(
}) hash: String,
file_path: &str,
api_multiaddr: String,
timeout_sec: u64,
) -> IpfsResult {
let args = vec![String::from("dag"), String::from("get"), hash];
let cmd = make_cmd_args(args, api_multiaddr, timeout_sec);
let result: Result<()> = try {
let dag = run_ipfs(cmd)?;
fs::write(inject_vault_host_path(file_path.to_string()), dag)?
};
result
.map(|_| ())
.map_err(|e| eyre::eyre!("dag_get error: {:?}", e))
.into() .into()
} }
@ -143,13 +176,9 @@ pub fn get_peer_id(api_multiaddr: String, timeout_sec: u64) -> IpfsGetPeerIdResu
/// Cat file by provided hash from IPFS, /// Cat file by provided hash from IPFS,
#[marine] #[marine]
pub fn cat(hash: String, api_multiaddr: String, timeout_sec: u64) -> IpfsCatResult { pub fn cat(hash: String, api_multiaddr: String, timeout_sec: u64) -> IpfsCatResult {
log::info!("cat called with hash {}", hash);
let args = vec![String::from("cat"), hash]; let args = vec![String::from("cat"), hash];
let cmd = make_cmd_args(args, api_multiaddr, timeout_sec); let cmd = make_cmd_args(args, api_multiaddr, timeout_sec);
log::info!("ipfs cat args {:?}", cmd);
run_ipfs(cmd) run_ipfs(cmd)
.map_err(|e| eyre::eyre!("ipfs cat error: {:?}", e)) .map_err(|e| eyre::eyre!("ipfs cat error: {:?}", e))
.into() .into()

View File

@ -128,7 +128,6 @@ pub fn connect(multiaddr: String) -> IpfsResult {
#[marine] #[marine]
pub fn put(file_path: String) -> IpfsPutResult { pub fn put(file_path: String) -> IpfsPutResult {
log::info!("put called with {:?}", file_path);
let timeout = load_config().timeout; let timeout = load_config().timeout;
let local_maddr = load_local_api_multiaddr().map(|m| m.to_string()); let local_maddr = load_local_api_multiaddr().map(|m| m.to_string());
if local_maddr.is_ok() { if local_maddr.is_ok() {
@ -138,6 +137,17 @@ pub fn put(file_path: String) -> IpfsPutResult {
} }
} }
#[marine]
pub fn dag_put(file_path: String) -> IpfsPutResult {
let timeout = load_config().timeout;
let local_maddr = load_local_api_multiaddr().map(|m| m.to_string());
if local_maddr.is_ok() {
ipfs_dag_put(file_path, local_maddr.unwrap(), timeout)
} else {
local_maddr.into()
}
}
#[marine] #[marine]
pub fn get(hash: String) -> IpfsGetResult { pub fn get(hash: String) -> IpfsGetResult {
let local_maddr = load_local_api_multiaddr().map(|m| m.to_string()); let local_maddr = load_local_api_multiaddr().map(|m| m.to_string());
@ -150,7 +160,6 @@ pub fn get(hash: String) -> IpfsGetResult {
#[marine] #[marine]
pub fn get_from(hash: String, external_multiaddr: String) -> IpfsGetResult { pub fn get_from(hash: String, external_multiaddr: String) -> IpfsGetResult {
log::info!("get from called with hash: {}", hash);
let config = load_config(); let config = load_config();
let timeout = config.timeout; let timeout = config.timeout;
@ -161,7 +170,38 @@ pub fn get_from(hash: String, external_multiaddr: String) -> IpfsGetResult {
let particle_id = marine_rs_sdk::get_call_parameters().particle_id; let particle_id = marine_rs_sdk::get_call_parameters().particle_id;
let particle_vault_path = format!("/tmp/vault/{}", particle_id); let particle_vault_path = format!("/tmp/vault/{}", particle_id);
let path = format!("{}/{}", particle_vault_path, hash); let path = format!("{}/{}", particle_vault_path, hash);
let get_result = ipfs_get(hash, path.clone(), external_multiaddr, timeout); let get_result = ipfs_get(hash, &path, external_multiaddr, timeout);
if get_result.success {
Ok(path).into()
} else {
Err(eyre::eyre!(get_result.error)).into()
}
}
#[marine]
pub fn dag_get(hash: String) -> IpfsGetResult {
let local_maddr = load_local_api_multiaddr().map(|m| m.to_string());
if local_maddr.is_ok() {
dag_get_from(hash, local_maddr.unwrap())
} else {
local_maddr.into()
}
}
#[marine]
pub fn dag_get_from(hash: String, external_multiaddr: String) -> IpfsGetResult {
let config = load_config();
let timeout = config.timeout;
if Multiaddr::from_str(&external_multiaddr).is_err() {
return Err(eyre::eyre!("invalid multiaddr: {}", external_multiaddr)).into();
}
let particle_id = marine_rs_sdk::get_call_parameters().particle_id;
let particle_vault_path = format!("/tmp/vault/{}", particle_id);
let path = format!("{}/{}", particle_vault_path, hash);
let get_result = ipfs_dag_get(hash, &path, external_multiaddr, timeout);
if get_result.success { if get_result.success {
Ok(path).into() Ok(path).into()
@ -182,7 +222,6 @@ pub fn cat(hash: String) -> IpfsCatResult {
#[marine] #[marine]
pub fn cat_from(hash: String, external_multiaddr: String) -> IpfsCatResult { pub fn cat_from(hash: String, external_multiaddr: String) -> IpfsCatResult {
log::info!("cat_from called with hash: {}", hash);
let config = load_config(); let config = load_config();
let timeout = config.timeout; let timeout = config.timeout;
@ -362,19 +401,32 @@ extern "C" {
#[link_name = "put"] #[link_name = "put"]
pub fn ipfs_put(file_path: String, api_multiaddr: String, timeout_sec: u64) -> IpfsPutResult; pub fn ipfs_put(file_path: String, api_multiaddr: String, timeout_sec: u64) -> IpfsPutResult;
/// Put provided dag to ipfs, return ipfs hash of the dag.
#[link_name = "dag_put"]
pub fn ipfs_dag_put(dag: String, api_multiaddr: String, timeout_sec: u64) -> IpfsPutResult;
/// Get file from ipfs by hash. /// Get file from ipfs by hash.
#[link_name = "get"] #[link_name = "get"]
pub fn ipfs_get( pub fn ipfs_get(
hash: String, hash: String,
file_path: String, file_path: &str,
api_multiaddr: String, api_multiaddr: String,
timeout_sec: u64, timeout_sec: u64,
) -> IpfsResult; ) -> IpfsResult;
#[link_name = "get_peer_id"] // Get dag from ipfs by hash.
pub fn ipfs_get_peer_id(local_multiaddr: String, timeout_sec: u64) -> IpfsGetPeerIdResult; #[link_name = "dag_get"]
pub fn ipfs_dag_get(
hash: String,
file_path: &str,
api_multiaddr: String,
timeout_sec: u64,
) -> IpfsResult;
/// Get file from ipfs by hash. /// Get file from ipfs by hash.
#[link_name = "cat"] #[link_name = "cat"]
pub fn ipfs_cat(hash: String, api_multiaddr: String, timeout_sec: u64) -> IpfsCatResult; pub fn ipfs_cat(hash: String, api_multiaddr: String, timeout_sec: u64) -> IpfsCatResult;
#[link_name = "get_peer_id"]
pub fn ipfs_get_peer_id(local_multiaddr: String, timeout_sec: u64) -> IpfsGetPeerIdResult;
} }